aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStuart Mackie <wsmackie@juniper.net>2017-03-23 06:19:54 -0700
committerStuart Mackie <wsmackie@juniper.net>2017-03-23 06:19:54 -0700
commit88df88a19674ccc0017836941b8ee32eaadf19fb (patch)
treef930c90f75846ec8d8e33cf27325ff8fafc85d5c
parent9f50a40437477432a21b326b15c343ca6b8fe516 (diff)
Deleted charms with wrong license. Will source them differently in future.
Change-Id: I0fc99ea03c6b6ca4701e63793cb2be60e56c7588 Signed-off-by: Stuart Mackie <wsmackie@juniper.net>
-rw-r--r--charms/trusty/cassandra/.bzr/README3
-rw-r--r--charms/trusty/cassandra/.bzr/branch-format1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/branch.conf1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/format1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/last-revision1
-rw-r--r--charms/trusty/cassandra/.bzr/branch/tags0
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/dirstatebin21533 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/format1
-rw-r--r--charms/trusty/cassandra/.bzr/checkout/views0
-rw-r--r--charms/trusty/cassandra/.bzr/repository/format1
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cixbin26592 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iixbin27223 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rixbin27284 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.sixbin14814 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tixbin79199 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/pack-namesbin149 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.packbin641193 -> 0 bytes
-rw-r--r--charms/trusty/cassandra/.bzrignore9
-rw-r--r--charms/trusty/cassandra/Makefile224
-rw-r--r--charms/trusty/cassandra/README.md179
-rw-r--r--charms/trusty/cassandra/README.storage41
-rw-r--r--charms/trusty/cassandra/charm-helpers.yaml27
-rw-r--r--charms/trusty/cassandra/config.yaml316
-rw-r--r--charms/trusty/cassandra/copyright19
-rw-r--r--charms/trusty/cassandra/files/check_cassandra_heap.sh36
-rw-r--r--charms/trusty/cassandra/hooks/actions.py990
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py126
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py398
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py175
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py318
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py40
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/coordinator.py607
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py1026
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/host.py695
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py71
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py292
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/templating.py81
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py68
-rw-r--r--charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py70
-rwxr-xr-xcharms/trusty/cassandra/hooks/cluster-relation-changed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/cluster-relation-departed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/config-changed20
-rw-r--r--charms/trusty/cassandra/hooks/coordinator.py35
-rwxr-xr-xcharms/trusty/cassandra/hooks/data-relation-changed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/data-relation-departed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/database-admin-relation-changed20
-rwxr-xr-xcharms/trusty/cassandra/hooks/database-relation-changed20
-rw-r--r--charms/trusty/cassandra/hooks/definitions.py127
-rw-r--r--charms/trusty/cassandra/hooks/helpers.py1084
-rw-r--r--charms/trusty/cassandra/hooks/hooks.py61
-rwxr-xr-xcharms/trusty/cassandra/hooks/install20
-rwxr-xr-xcharms/trusty/cassandra/hooks/leader-elected20
-rwxr-xr-xcharms/trusty/cassandra/hooks/leader-settings-changed20
-rw-r--r--charms/trusty/cassandra/hooks/loglog.py42
-rwxr-xr-xcharms/trusty/cassandra/hooks/nrpe-external-master-relation-changed20
-rw-r--r--charms/trusty/cassandra/hooks/relations.py139
-rwxr-xr-xcharms/trusty/cassandra/hooks/stop20
-rwxr-xr-xcharms/trusty/cassandra/hooks/upgrade-charm20
-rw-r--r--charms/trusty/cassandra/icon.svg650
-rw-r--r--charms/trusty/cassandra/lib/apache.key53
-rw-r--r--charms/trusty/cassandra/lib/datastax.key49
-rwxr-xr-xcharms/trusty/cassandra/lib/juju-deployer-wrapper.py15
-rwxr-xr-xcharms/trusty/cassandra/lib/testcharms/empty/hooks/install6
-rw-r--r--charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml11
-rw-r--r--charms/trusty/cassandra/metadata.yaml38
-rwxr-xr-xcharms/trusty/cassandra/scripts/volume-common.sh220
-rw-r--r--charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl6
-rw-r--r--charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl6
-rw-r--r--charms/trusty/cassandra/templates/nrpe_service_file.tmpl10
-rw-r--r--charms/trusty/cassandra/testing/__init__.py15
-rw-r--r--charms/trusty/cassandra/testing/amuletfixture.py234
-rw-r--r--charms/trusty/cassandra/testing/mocks.py182
-rw-r--r--charms/trusty/cassandra/tests/__init__.py15
-rwxr-xr-xcharms/trusty/cassandra/tests/base.py43
-rwxr-xr-xcharms/trusty/cassandra/tests/test_actions.py1156
-rwxr-xr-xcharms/trusty/cassandra/tests/test_definitions.py104
-rwxr-xr-xcharms/trusty/cassandra/tests/test_helpers.py1466
-rwxr-xr-xcharms/trusty/cassandra/tests/test_integration.py620
-rw-r--r--charms/trusty/cassandra/tests/tests.yaml15
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/README3
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/branch-format1
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/branch/format1
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/branch/location1
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/checkout/dirstatebin39124 -> 0 bytes
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/checkout/format1
-rw-r--r--charms/trusty/ceilometer-agent/.bzr/checkout/views0
-rw-r--r--charms/trusty/ceilometer-agent/.coveragerc6
-rw-r--r--charms/trusty/ceilometer-agent/.gitignore9
-rw-r--r--charms/trusty/ceilometer-agent/.gitreview5
-rw-r--r--charms/trusty/ceilometer-agent/.project17
-rw-r--r--charms/trusty/ceilometer-agent/.pydevproject9
-rw-r--r--charms/trusty/ceilometer-agent/.testr.conf8
-rw-r--r--charms/trusty/ceilometer-agent/LICENSE202
-rw-r--r--charms/trusty/ceilometer-agent/Makefile26
-rw-r--r--charms/trusty/ceilometer-agent/README.md7
-rw-r--r--charms/trusty/ceilometer-agent/actions.yaml6
-rwxr-xr-xcharms/trusty/ceilometer-agent/actions/actions.py62
l---------charms/trusty/ceilometer-agent/actions/openstack-upgrade1
-rwxr-xr-xcharms/trusty/ceilometer-agent/actions/openstack_upgrade.py52
l---------charms/trusty/ceilometer-agent/actions/pause1
l---------charms/trusty/ceilometer-agent/actions/resume1
-rw-r--r--charms/trusty/ceilometer-agent/charm-helpers-hooks.yaml12
-rw-r--r--charms/trusty/ceilometer-agent/charm-helpers-tests.yaml5
-rw-r--r--charms/trusty/ceilometer-agent/config.yaml59
-rw-r--r--charms/trusty/ceilometer-agent/copyright16
l---------charms/trusty/ceilometer-agent/hooks/ceilometer-service-relation-changed1
-rw-r--r--charms/trusty/ceilometer-agent/hooks/ceilometer_contexts.py73
-rwxr-xr-xcharms/trusty/ceilometer-agent/hooks/ceilometer_hooks.py106
-rw-r--r--charms/trusty/ceilometer-agent/hooks/ceilometer_utils.py219
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/__init__.py36
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/__init__.py189
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/benchmark.py34
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/commands.py30
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/hookenv.py21
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/host.py29
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/unitdata.py37
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/nrpe.py396
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/volumes.py173
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/apache.py95
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/cluster.py363
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/ip.py497
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py31
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py295
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py1010
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py1508
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py21
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py16
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py128
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py179
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py382
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py16
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py321
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py1891
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/packages.py147
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/ceph.py1333
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/loopback.py86
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/lvm.py103
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/utils.py69
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/decorators.py55
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/files.py43
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/fstab.py132
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hookenv.py1007
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/host.py765
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/kernel.py66
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/__init__.py16
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/base.py351
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/helpers.py290
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/strutils.py70
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/sysctl.py54
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/templating.py84
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/core/unitdata.py518
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/__init__.py467
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/archiveurl.py165
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/bzrurl.py75
-rw-r--r--charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/giturl.py68
l---------charms/trusty/ceilometer-agent/hooks/config-changed1
l---------charms/trusty/ceilometer-agent/hooks/install1
l---------charms/trusty/ceilometer-agent/hooks/nova-ceilometer-relation-joined1
l---------charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-changed1
l---------charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-joined1
l---------charms/trusty/ceilometer-agent/hooks/start1
l---------charms/trusty/ceilometer-agent/hooks/stop1
l---------charms/trusty/ceilometer-agent/hooks/upgrade-charm1
-rw-r--r--charms/trusty/ceilometer-agent/icon.svg717
-rw-r--r--charms/trusty/ceilometer-agent/local.yaml3
-rw-r--r--charms/trusty/ceilometer-agent/metadata.yaml31
-rw-r--r--charms/trusty/ceilometer-agent/requirements.txt11
-rw-r--r--charms/trusty/ceilometer-agent/revision1
-rw-r--r--charms/trusty/ceilometer-agent/setup.cfg6
-rw-r--r--charms/trusty/ceilometer-agent/templates/icehouse/ceilometer.conf33
-rw-r--r--charms/trusty/ceilometer-agent/templates/parts/rabbitmq21
-rw-r--r--charms/trusty/ceilometer-agent/test-requirements.txt25
-rw-r--r--charms/trusty/ceilometer-agent/tests/README.md9
-rw-r--r--charms/trusty/ceilometer-agent/tests/basic_deployment.py678
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/__init__.py36
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/deployment.py97
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/utils.py827
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/__init__.py13
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/deployment.py295
-rw-r--r--charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/utils.py1010
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/dev-basic-xenial-newton25
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/dev-basic-yakkety-newton23
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/gate-basic-precise-icehouse25
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/gate-basic-trusty-icehouse23
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/gate-basic-trusty-kilo25
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/gate-basic-trusty-liberty25
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/gate-basic-trusty-mitaka25
-rwxr-xr-xcharms/trusty/ceilometer-agent/tests/gate-basic-xenial-mitaka23
-rw-r--r--charms/trusty/ceilometer-agent/tests/tests.yaml17
-rw-r--r--charms/trusty/ceilometer-agent/tox.ini75
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/__init__.py17
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/test_actions.py78
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/test_actions_openstack_upgrade.py69
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_contexts.py59
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_hooks.py127
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_utils.py116
-rw-r--r--charms/trusty/ceilometer-agent/unit_tests/test_utils.py128
-rw-r--r--charms/trusty/ceilometer-contrail/.bzrignore1
-rw-r--r--charms/trusty/ceilometer-contrail/Makefile10
-rw-r--r--charms/trusty/ceilometer-contrail/README.md32
-rw-r--r--charms/trusty/ceilometer-contrail/charm-helpers-sync.yaml5
-rw-r--r--charms/trusty/ceilometer-contrail/config.yaml7
-rw-r--r--charms/trusty/ceilometer-contrail/copyright17
l---------charms/trusty/ceilometer-contrail/hooks/ceilometer-plugin-relation-joined1
-rwxr-xr-xcharms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_hooks.py106
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_utils.py25
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hookenv.py1009
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/host.py714
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hugepage.py71
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/helpers.py292
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/templating.py81
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py.orig472
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/bzrurl.py68
-rw-r--r--charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/giturl.py70
l---------charms/trusty/ceilometer-contrail/hooks/config-changed1
l---------charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-broken1
l---------charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-changed1
l---------charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-departed1
l---------charms/trusty/ceilometer-contrail/hooks/install1
l---------charms/trusty/ceilometer-contrail/hooks/start1
l---------charms/trusty/ceilometer-contrail/hooks/stop1
l---------charms/trusty/ceilometer-contrail/hooks/upgrade-charm1
-rw-r--r--charms/trusty/ceilometer-contrail/icon.svg309
-rw-r--r--charms/trusty/ceilometer-contrail/metadata.yaml25
-rw-r--r--charms/trusty/ceilometer/.bzr/README3
-rw-r--r--charms/trusty/ceilometer/.bzr/branch-format1
-rw-r--r--charms/trusty/ceilometer/.bzr/branch/format1
-rw-r--r--charms/trusty/ceilometer/.bzr/branch/location1
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/dirstatebin70502 -> 0 bytes
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/format1
-rw-r--r--charms/trusty/ceilometer/.bzr/checkout/views0
-rw-r--r--charms/trusty/ceilometer/.coveragerc6
-rw-r--r--charms/trusty/ceilometer/.gitignore7
-rw-r--r--charms/trusty/ceilometer/.gitreview5
-rw-r--r--charms/trusty/ceilometer/.project17
-rw-r--r--charms/trusty/ceilometer/.pydevproject9
-rw-r--r--charms/trusty/ceilometer/.testr.conf8
-rw-r--r--charms/trusty/ceilometer/Makefile31
-rw-r--r--charms/trusty/ceilometer/README.md64
-rw-r--r--charms/trusty/ceilometer/actions.yaml6
-rwxr-xr-xcharms/trusty/ceilometer/actions/actions.py48
l---------charms/trusty/ceilometer/actions/ceilometer_contexts.py1
l---------charms/trusty/ceilometer/actions/ceilometer_hooks.py1
l---------charms/trusty/ceilometer/actions/ceilometer_utils.py1
l---------charms/trusty/ceilometer/actions/charmhelpers1
l---------charms/trusty/ceilometer/actions/openstack-upgrade1
-rwxr-xr-xcharms/trusty/ceilometer/actions/openstack_upgrade.py37
l---------charms/trusty/ceilometer/actions/pause1
l---------charms/trusty/ceilometer/actions/resume1
-rw-r--r--charms/trusty/ceilometer/charm-helpers-hooks.yaml15
-rw-r--r--charms/trusty/ceilometer/charm-helpers-tests.yaml5
-rw-r--r--charms/trusty/ceilometer/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/__init__.py191
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/benchmark.py36
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/commands.py32
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/hookenv.py23
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/host.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/cli/unitdata.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py398
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py175
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py82
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py316
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py100
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py63
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py100
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py105
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py552
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml13
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema9
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml38
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml67
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema42
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml49
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema42
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py84
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py50
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py55
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py67
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py52
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py134
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py45
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py39
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py131
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py211
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf8
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf7
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs349
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules117
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf11
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh8
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty11
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally214
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py89
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py19
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py31
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py394
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py0
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config70
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config159
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py157
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py499
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py304
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py1012
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py1583
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh34
-rwxr-xr-xcharms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py179
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py384
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf21
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg66
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend26
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf26
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy10
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka12
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py1576
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py269
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py145
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py1206
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py88
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/hookenv.py1009
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/host.py714
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/hugepage.py71
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/services/helpers.py292
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/templating.py81
-rw-r--r--charms/trusty/ceilometer/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/__init__.py464
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py68
-rw-r--r--charms/trusty/ceilometer/charmhelpers/fetch/giturl.py70
-rw-r--r--charms/trusty/ceilometer/charmhelpers/payload/__init__.py17
-rw-r--r--charms/trusty/ceilometer/charmhelpers/payload/execd.py66
-rw-r--r--charms/trusty/ceilometer/config.yaml194
-rw-r--r--charms/trusty/ceilometer/copyright32
-rw-r--r--charms/trusty/ceilometer/hardening.yaml5
l---------charms/trusty/ceilometer/hooks/amqp-relation-changed1
l---------charms/trusty/ceilometer/hooks/amqp-relation-departed1
l---------charms/trusty/ceilometer/hooks/amqp-relation-joined1
l---------charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken1
l---------charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed1
l---------charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed1
l---------charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined1
l---------charms/trusty/ceilometer/hooks/ceilometer_contexts.py1
-rwxr-xr-xcharms/trusty/ceilometer/hooks/ceilometer_hooks.py366
l---------charms/trusty/ceilometer/hooks/ceilometer_utils.py1
l---------charms/trusty/ceilometer/hooks/charmhelpers1
l---------charms/trusty/ceilometer/hooks/cluster-relation-changed1
l---------charms/trusty/ceilometer/hooks/cluster-relation-departed1
l---------charms/trusty/ceilometer/hooks/cluster-relation-joined1
l---------charms/trusty/ceilometer/hooks/config-changed1
l---------charms/trusty/ceilometer/hooks/ha-relation-changed1
l---------charms/trusty/ceilometer/hooks/ha-relation-joined1
l---------charms/trusty/ceilometer/hooks/identity-notifications-relation-changed1
l---------charms/trusty/ceilometer/hooks/identity-service-relation-changed1
l---------charms/trusty/ceilometer/hooks/identity-service-relation-joined1
-rwxr-xr-xcharms/trusty/ceilometer/hooks/install20
l---------charms/trusty/ceilometer/hooks/install.real1
l---------charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed1
l---------charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined1
l---------charms/trusty/ceilometer/hooks/shared-db-relation-changed1
l---------charms/trusty/ceilometer/hooks/shared-db-relation-departed1
l---------charms/trusty/ceilometer/hooks/shared-db-relation-joined1
l---------charms/trusty/ceilometer/hooks/start1
l---------charms/trusty/ceilometer/hooks/stop1
l---------charms/trusty/ceilometer/hooks/update-status1
l---------charms/trusty/ceilometer/hooks/upgrade-charm1
-rw-r--r--charms/trusty/ceilometer/icon.svg717
-rw-r--r--charms/trusty/ceilometer/lib/ceilometer_contexts.py122
-rw-r--r--charms/trusty/ceilometer/lib/ceilometer_utils.py391
-rw-r--r--charms/trusty/ceilometer/metadata.yaml42
-rwxr-xr-xcharms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central345
-rw-r--r--charms/trusty/ceilometer/requirements.txt11
-rw-r--r--charms/trusty/ceilometer/revision1
-rw-r--r--charms/trusty/ceilometer/setup.cfg6
-rw-r--r--charms/trusty/ceilometer/templates/icehouse/ceilometer.conf42
-rw-r--r--charms/trusty/ceilometer/templates/kilo/ceilometer.conf43
-rw-r--r--charms/trusty/ceilometer/templates/mitaka/ceilometer.conf42
-rw-r--r--charms/trusty/ceilometer/templates/parts/rabbitmq21
-rw-r--r--charms/trusty/ceilometer/test-requirements.txt9
-rwxr-xr-xcharms/trusty/ceilometer/tests/014-basic-precise-icehouse11
-rwxr-xr-xcharms/trusty/ceilometer/tests/015-basic-trusty-icehouse9
-rwxr-xr-xcharms/trusty/ceilometer/tests/016-basic-trusty-juno11
-rwxr-xr-xcharms/trusty/ceilometer/tests/017-basic-trusty-kilo11
-rwxr-xr-xcharms/trusty/ceilometer/tests/018-basic-trusty-liberty11
-rwxr-xr-xcharms/trusty/ceilometer/tests/019-basic-trusty-mitaka11
-rwxr-xr-xcharms/trusty/ceilometer/tests/020-basic-wily-liberty9
-rwxr-xr-xcharms/trusty/ceilometer/tests/021-basic-xenial-mitaka9
-rw-r--r--charms/trusty/ceilometer/tests/README113
-rw-r--r--charms/trusty/ceilometer/tests/basic_deployment.py664
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py95
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py829
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py304
-rw-r--r--charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py1012
-rwxr-xr-xcharms/trusty/ceilometer/tests/setup/00-setup18
-rw-r--r--charms/trusty/ceilometer/tests/tests.yaml21
-rw-r--r--charms/trusty/ceilometer/tox.ini29
-rw-r--r--charms/trusty/ceilometer/unit_tests/__init__.py3
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_actions.py64
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py67
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py164
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py370
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py180
-rw-r--r--charms/trusty/ceilometer/unit_tests/test_utils.py111
-rw-r--r--charms/trusty/contrail-analytics/.bzrignore1
-rw-r--r--charms/trusty/contrail-analytics/Makefile10
-rw-r--r--charms/trusty/contrail-analytics/README.md49
-rw-r--r--charms/trusty/contrail-analytics/charm-helpers-sync.yaml9
-rw-r--r--charms/trusty/contrail-analytics/config.yaml34
-rw-r--r--charms/trusty/contrail-analytics/copyright17
-rw-r--r--charms/trusty/contrail-analytics/files/contrail5
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-analytics-api.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr6
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-collector.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini6
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini13
-rw-r--r--charms/trusty/contrail-analytics/files/contrail-topology.ini13
-rwxr-xr-xcharms/trusty/contrail-analytics/files/ntpq-nodemgr10
l---------charms/trusty/contrail-analytics/hooks/cassandra-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/cassandra-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/cassandra-relation-departed1
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py456
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py96
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py318
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py197
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py963
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py1416
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh32
-rwxr-xr-xcharms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py151
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py356
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg58
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend24
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf24
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken9
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py926
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py56
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py121
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py58
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py34
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py657
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py78
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py71
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py73
l---------charms/trusty/contrail-analytics/hooks/config-changed1
l---------charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined1
l---------charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed1
l---------charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed1
-rwxr-xr-xcharms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py305
-rw-r--r--charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py318
l---------charms/trusty/contrail-analytics/hooks/http-services-relation-joined1
l---------charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed1
l---------charms/trusty/contrail-analytics/hooks/install1
l---------charms/trusty/contrail-analytics/hooks/kafka-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/kafka-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/kafka-relation-departed1
l---------charms/trusty/contrail-analytics/hooks/start1
l---------charms/trusty/contrail-analytics/hooks/stop1
l---------charms/trusty/contrail-analytics/hooks/upgrade-charm1
l---------charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken1
l---------charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed1
l---------charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed1
-rw-r--r--charms/trusty/contrail-analytics/icon.svg309
-rw-r--r--charms/trusty/contrail-analytics/metadata.yaml28
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf22
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf26
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf9
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-collector.conf31
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf11
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-query-engine.conf24
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf28
-rw-r--r--charms/trusty/contrail-analytics/templates/contrail-topology.conf19
-rw-r--r--charms/trusty/contrail-analytics/templates/vnc_api_lib.ini16
-rw-r--r--charms/trusty/contrail-configuration/.bzrignore1
-rw-r--r--charms/trusty/contrail-configuration/Makefile10
-rw-r--r--charms/trusty/contrail-configuration/README.md111
-rw-r--r--charms/trusty/contrail-configuration/charm-helpers-sync.yaml9
-rw-r--r--charms/trusty/contrail-configuration/config.yaml48
-rw-r--r--charms/trusty/contrail-configuration/copyright17
-rw-r--r--charms/trusty/contrail-configuration/files/contrail-config-nodemgr6
-rw-r--r--charms/trusty/contrail-configuration/files/contrail-nodemgr-config.ini6
-rw-r--r--charms/trusty/contrail-configuration/files/ifmap6
-rw-r--r--charms/trusty/contrail-configuration/files/ifmap.ini12
-rwxr-xr-xcharms/trusty/contrail-configuration/files/ntpq-nodemgr10
l---------charms/trusty/contrail-configuration/hooks/amqp-relation-broken1
l---------charms/trusty/contrail-configuration/hooks/amqp-relation-changed1
l---------charms/trusty/contrail-configuration/hooks/amqp-relation-departed1
l---------charms/trusty/contrail-configuration/hooks/amqp-relation-joined1
l---------charms/trusty/contrail-configuration/hooks/cassandra-relation-broken1
l---------charms/trusty/contrail-configuration/hooks/cassandra-relation-changed1
l---------charms/trusty/contrail-configuration/hooks/cassandra-relation-departed1
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ip.py456
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ovs/__init__.py96
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ufw.py318
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/deployment.py197
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/utils.py963
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/context.py1416
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh32
-rwxr-xr-xcharms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/ip.py151
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/neutron.py356
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/ceph.conf15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg58
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend24
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf24
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken9
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/utils.py926
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/debug.py56
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/packages.py121
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/rpdb.py58
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/version.py34
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/ceph.py657
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/loopback.py78
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/utils.py71
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/giturl.py73
l---------charms/trusty/contrail-configuration/hooks/config-changed1
l---------charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-broken1
l---------charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-changed1
l---------charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-departed1
l---------charms/trusty/contrail-configuration/hooks/contrail-api-relation-joined1
l---------charms/trusty/contrail-configuration/hooks/contrail-discovery-relation-joined1
l---------charms/trusty/contrail-configuration/hooks/contrail-ifmap-relation-joined1
-rwxr-xr-xcharms/trusty/contrail-configuration/hooks/contrail_configuration_hooks.py478
-rw-r--r--charms/trusty/contrail-configuration/hooks/contrail_configuration_utils.py503
l---------charms/trusty/contrail-configuration/hooks/http-services-relation-joined1
l---------charms/trusty/contrail-configuration/hooks/identity-admin-relation-broken1
l---------charms/trusty/contrail-configuration/hooks/identity-admin-relation-changed1
l---------charms/trusty/contrail-configuration/hooks/identity-admin-relation-departed1
l---------charms/trusty/contrail-configuration/hooks/install1
l---------charms/trusty/contrail-configuration/hooks/leader-settings-changed1
l---------charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-broken1
l---------charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-changed1
l---------charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-departed1
l---------charms/trusty/contrail-configuration/hooks/start1
l---------charms/trusty/contrail-configuration/hooks/stop1
l---------charms/trusty/contrail-configuration/hooks/upgrade-charm1
l---------charms/trusty/contrail-configuration/hooks/zookeeper-relation-broken1
l---------charms/trusty/contrail-configuration/hooks/zookeeper-relation-changed1
l---------charms/trusty/contrail-configuration/hooks/zookeeper-relation-departed1
-rw-r--r--charms/trusty/contrail-configuration/icon.svg309
-rw-r--r--charms/trusty/contrail-configuration/metadata.yaml35
-rwxr-xr-xcharms/trusty/contrail-configuration/scripts/deactivate_floating_pool.py44
-rwxr-xr-xcharms/trusty/contrail-configuration/scripts/delete_floating_pool.py44
-rw-r--r--charms/trusty/contrail-configuration/templates/basicauthusers.properties25
-rw-r--r--charms/trusty/contrail-configuration/templates/contrail-api.conf30
-rw-r--r--charms/trusty/contrail-configuration/templates/contrail-barbican-auth.conf13
-rw-r--r--charms/trusty/contrail-configuration/templates/contrail-config-nodemgr.conf9
-rw-r--r--charms/trusty/contrail-configuration/templates/contrail-device-manager.conf25
-rw-r--r--charms/trusty/contrail-configuration/templates/contrail-schema.conf24
-rw-r--r--charms/trusty/contrail-configuration/templates/contrail-svc-monitor.conf35
-rw-r--r--charms/trusty/contrail-configuration/templates/discovery.conf18
-rw-r--r--charms/trusty/contrail-configuration/templates/vnc_api_lib.ini16
-rw-r--r--charms/trusty/contrail-control/.bzrignore1
-rw-r--r--charms/trusty/contrail-control/Makefile10
-rw-r--r--charms/trusty/contrail-control/README.md46
-rw-r--r--charms/trusty/contrail-control/charm-helpers-sync.yaml9
-rw-r--r--charms/trusty/contrail-control/config.yaml21
-rw-r--r--charms/trusty/contrail-control/copyright17
-rw-r--r--charms/trusty/contrail-control/files/contrail-control-nodemgr6
-rw-r--r--charms/trusty/contrail-control/files/contrail-nodemgr-control.ini6
-rwxr-xr-xcharms/trusty/contrail-control/files/ntpq-nodemgr10
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ip.py456
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ovs/__init__.py96
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ufw.py318
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/alternatives.py33
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/deployment.py197
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/utils.py963
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/context.py1416
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/__init__.py18
-rwxr-xr-xcharms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh32
-rwxr-xr-xcharms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh30
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/ip.py151
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/neutron.py356
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/__init__.py18
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/ceph.conf15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/git.upstart17
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg58
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend24
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf24
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken9
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo22
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-zeromq14
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templating.py323
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/utils.py926
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/debug.py56
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/packages.py121
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/rpdb.py58
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/version.py34
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/ceph.py657
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/loopback.py78
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/lvm.py105
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/utils.py71
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/contrail-control/hooks/charmhelpers/fetch/giturl.py73
l---------charms/trusty/contrail-control/hooks/config-changed1
l---------charms/trusty/contrail-control/hooks/contrail-api-relation-broken1
l---------charms/trusty/contrail-control/hooks/contrail-api-relation-changed1
l---------charms/trusty/contrail-control/hooks/contrail-api-relation-departed1
l---------charms/trusty/contrail-control/hooks/contrail-discovery-relation-broken1
l---------charms/trusty/contrail-control/hooks/contrail-discovery-relation-changed1
l---------charms/trusty/contrail-control/hooks/contrail-discovery-relation-departed1
l---------charms/trusty/contrail-control/hooks/contrail-ifmap-relation-broken1
l---------charms/trusty/contrail-control/hooks/contrail-ifmap-relation-changed1
l---------charms/trusty/contrail-control/hooks/contrail-ifmap-relation-departed1
-rwxr-xr-xcharms/trusty/contrail-control/hooks/contrail_control_hooks.py174
-rw-r--r--charms/trusty/contrail-control/hooks/contrail_control_utils.py249
l---------charms/trusty/contrail-control/hooks/identity-admin-relation-broken1
l---------charms/trusty/contrail-control/hooks/identity-admin-relation-changed1
l---------charms/trusty/contrail-control/hooks/identity-admin-relation-departed1
l---------charms/trusty/contrail-control/hooks/install1
l---------charms/trusty/contrail-control/hooks/start1
l---------charms/trusty/contrail-control/hooks/stop1
l---------charms/trusty/contrail-control/hooks/upgrade-charm1
-rw-r--r--charms/trusty/contrail-control/icon.svg309
-rw-r--r--charms/trusty/contrail-control/metadata.yaml22
-rw-r--r--charms/trusty/contrail-control/templates/contrail-control-nodemgr.conf9
-rw-r--r--charms/trusty/contrail-control/templates/control-node.conf16
-rw-r--r--charms/trusty/contrail-control/templates/vnc_api_lib.ini16
-rw-r--r--charms/trusty/contrail-webui/.bzrignore4
-rw-r--r--charms/trusty/contrail-webui/.project17
-rw-r--r--charms/trusty/contrail-webui/.pydevproject9
-rw-r--r--charms/trusty/contrail-webui/Makefile10
-rw-r--r--charms/trusty/contrail-webui/README.md72
-rw-r--r--charms/trusty/contrail-webui/charm-helpers-sync.yaml5
-rw-r--r--charms/trusty/contrail-webui/config.yaml52
-rw-r--r--charms/trusty/contrail-webui/copyright17
-rw-r--r--charms/trusty/contrail-webui/files/40contrail4
-rw-r--r--charms/trusty/contrail-webui/files/contrail-webui6
-rw-r--r--charms/trusty/contrail-webui/files/contrail-webui-contrail.ini14
-rw-r--r--charms/trusty/contrail-webui/files/contrail-webui-middleware6
-rw-r--r--charms/trusty/contrail-webui/files/contrail-webui-middleware-contrail.ini14
-rw-r--r--charms/trusty/contrail-webui/files/contrail-webui-middleware-opencontrail.ini14
-rw-r--r--charms/trusty/contrail-webui/files/contrail-webui-opencontrail.ini14
-rw-r--r--charms/trusty/contrail-webui/files/supervisor-webui.conf36
-rw-r--r--charms/trusty/contrail-webui/files/supervisord_webui.conf140
-rw-r--r--charms/trusty/contrail-webui/hooks/actions.py5
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/cassandra-relation-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/cassandra-relation-joined3
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/contrail-webui/hooks/charmhelpers/fetch/giturl.py73
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/config-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/contrail_api-relation-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/contrail_api-relation-joined3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/contrail_discovery-relation-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/contrail_discovery-relation-joined3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/identity_admin-relation-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/identity_admin-relation-joined3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/install30
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/leader-settings-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/redis-relation-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/redis-relation-joined3
-rw-r--r--charms/trusty/contrail-webui/hooks/services.py210
-rw-r--r--charms/trusty/contrail-webui/hooks/setup.py133
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/start3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/stop3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/upgrade-charm3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/website-relation-changed3
-rwxr-xr-xcharms/trusty/contrail-webui/hooks/website-relation-joined3
-rw-r--r--charms/trusty/contrail-webui/icon.svg309
-rw-r--r--charms/trusty/contrail-webui/metadata.yaml24
-rw-r--r--charms/trusty/contrail-webui/templates/config.global.js.j2315
-rw-r--r--charms/trusty/contrail-webui/templates/contrail-webui-userauth.js7
-rw-r--r--charms/trusty/kafka/.bzr/README3
-rw-r--r--charms/trusty/kafka/.bzr/branch-format1
-rw-r--r--charms/trusty/kafka/.bzr/branch/format1
-rw-r--r--charms/trusty/kafka/.bzr/branch/location1
-rw-r--r--charms/trusty/kafka/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/kafka/.bzr/checkout/dirstatebin10605 -> 0 bytes
-rw-r--r--charms/trusty/kafka/.bzr/checkout/format1
-rw-r--r--charms/trusty/kafka/.bzr/checkout/views0
-rw-r--r--charms/trusty/kafka/LICENSE177
-rw-r--r--charms/trusty/kafka/README.md84
-rw-r--r--charms/trusty/kafka/actions.yaml48
-rwxr-xr-xcharms/trusty/kafka/actions/create-topic40
-rwxr-xr-xcharms/trusty/kafka/actions/delete-topic36
-rwxr-xr-xcharms/trusty/kafka/actions/list-topics31
-rwxr-xr-xcharms/trusty/kafka/actions/list-zks28
-rwxr-xr-xcharms/trusty/kafka/actions/read-topic35
-rwxr-xr-xcharms/trusty/kafka/actions/write-topic36
-rw-r--r--charms/trusty/kafka/config.yaml7
-rw-r--r--charms/trusty/kafka/copyright16
-rw-r--r--charms/trusty/kafka/dist.yaml30
-rw-r--r--charms/trusty/kafka/hooks/callbacks.py181
-rwxr-xr-xcharms/trusty/kafka/hooks/common.py90
-rwxr-xr-xcharms/trusty/kafka/hooks/config-changed15
-rwxr-xr-xcharms/trusty/kafka/hooks/install17
-rwxr-xr-xcharms/trusty/kafka/hooks/kafka-relation-changed15
-rw-r--r--charms/trusty/kafka/hooks/setup.py33
-rwxr-xr-xcharms/trusty/kafka/hooks/start15
-rwxr-xr-xcharms/trusty/kafka/hooks/stop15
-rwxr-xr-xcharms/trusty/kafka/hooks/zookeeper-relation-changed15
-rwxr-xr-xcharms/trusty/kafka/hooks/zookeeper-relation-departed15
-rw-r--r--charms/trusty/kafka/icon.svg90
-rw-r--r--charms/trusty/kafka/metadata.yaml30
-rw-r--r--charms/trusty/kafka/resources.yaml12
-rw-r--r--charms/trusty/kafka/resources/python/PyYAML-3.11.tar.gzbin248685 -> 0 bytes
-rw-r--r--charms/trusty/kafka/resources/python/charmhelpers-0.3.1.tar.gzbin62031 -> 0 bytes
-rw-r--r--charms/trusty/kafka/resources/python/jujuresources-0.2.11.tar.gzbin12679 -> 0 bytes
-rw-r--r--charms/trusty/kafka/resources/python/pyaml-15.5.7.tar.gzbin14374 -> 0 bytes
-rw-r--r--charms/trusty/kafka/resources/python/six-1.9.0-py2.py3-none-any.whlbin10222 -> 0 bytes
-rw-r--r--charms/trusty/kafka/templates/upstart.conf14
-rwxr-xr-xcharms/trusty/kafka/tests/00-setup5
-rwxr-xr-xcharms/trusty/kafka/tests/100-deploy-kafka29
-rwxr-xr-xcharms/trusty/kafka/tests/remote/test_dist_config.py71
-rw-r--r--charms/trusty/kafka/tests/tests.yaml10
-rw-r--r--charms/trusty/keepalived/.project17
-rw-r--r--charms/trusty/keepalived/.pydevproject8
-rw-r--r--charms/trusty/keepalived/README.md23
-rw-r--r--charms/trusty/keepalived/bin/charm_helpers_sync.py253
-rw-r--r--charms/trusty/keepalived/charm-helpers-sync.yaml5
-rw-r--r--charms/trusty/keepalived/config.yaml11
-rw-r--r--charms/trusty/keepalived/copyright17
-rw-r--r--charms/trusty/keepalived/hooks/actions.py5
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/hookenv.py744
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/host.py468
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/services/base.py350
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/services/helpers.py267
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/strutils.py42
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/core/unitdata.py477
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/fetch/__init__.py439
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/fetch/archiveurl.py161
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/keepalived/hooks/charmhelpers/fetch/giturl.py73
-rwxr-xr-xcharms/trusty/keepalived/hooks/config-changed3
-rwxr-xr-xcharms/trusty/keepalived/hooks/install18
-rwxr-xr-xcharms/trusty/keepalived/hooks/juju_info-relation-joined3
-rwxr-xr-xcharms/trusty/keepalived/hooks/leader-elected3
-rw-r--r--charms/trusty/keepalived/hooks/services.py57
-rw-r--r--charms/trusty/keepalived/hooks/setup.py17
-rwxr-xr-xcharms/trusty/keepalived/hooks/start3
-rwxr-xr-xcharms/trusty/keepalived/hooks/stop3
-rwxr-xr-xcharms/trusty/keepalived/hooks/upgrade-charm3
-rw-r--r--charms/trusty/keepalived/icon.svg293
-rw-r--r--charms/trusty/keepalived/metadata.yaml18
-rw-r--r--charms/trusty/keepalived/templates/50-keepalived.conf3
-rw-r--r--charms/trusty/keepalived/templates/keepalived.conf10
-rwxr-xr-xcharms/trusty/keepalived/tests/00-setup5
-rwxr-xr-xcharms/trusty/keepalived/tests/10-deploy51
-rwxr-xr-xcharms/trusty/keepalived/unit_tests/test_actions.py21
-rw-r--r--charms/trusty/neutron-api-contrail/.bzrignore1
-rw-r--r--charms/trusty/neutron-api-contrail/Makefile10
-rw-r--r--charms/trusty/neutron-api-contrail/README.md40
-rw-r--r--charms/trusty/neutron-api-contrail/charm-helpers-sync.yaml5
-rw-r--r--charms/trusty/neutron-api-contrail/config.yaml10
-rw-r--r--charms/trusty/neutron-api-contrail/copyright17
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/giturl.py73
l---------charms/trusty/neutron-api-contrail/hooks/config-changed1
l---------charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-broken1
l---------charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-changed1
l---------charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-departed1
l---------charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-broken1
l---------charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-changed1
l---------charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-departed1
l---------charms/trusty/neutron-api-contrail/hooks/install1
l---------charms/trusty/neutron-api-contrail/hooks/neutron-plugin-api-subordinate-relation-joined1
-rwxr-xr-xcharms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_hooks.py120
-rw-r--r--charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_utils.py59
l---------charms/trusty/neutron-api-contrail/hooks/start1
l---------charms/trusty/neutron-api-contrail/hooks/stop1
l---------charms/trusty/neutron-api-contrail/hooks/upgrade-charm1
-rw-r--r--charms/trusty/neutron-api-contrail/icon.svg309
-rw-r--r--charms/trusty/neutron-api-contrail/metadata.yaml30
-rw-r--r--charms/trusty/neutron-api-contrail/templates/ContrailPlugin.ini30
-rw-r--r--charms/trusty/neutron-contrail/.bzrignore1
-rw-r--r--charms/trusty/neutron-contrail/Makefile10
-rw-r--r--charms/trusty/neutron-contrail/README.md131
-rw-r--r--charms/trusty/neutron-contrail/charm-helpers-sync.yaml5
-rw-r--r--charms/trusty/neutron-contrail/config.yaml74
-rw-r--r--charms/trusty/neutron-contrail/copyright17
-rw-r--r--charms/trusty/neutron-contrail/files/60-vrouter-vgw.conf3
-rw-r--r--charms/trusty/neutron-contrail/files/contrail-nodemgr-vrouter.ini6
-rw-r--r--charms/trusty/neutron-contrail/files/contrail-vrouter-nodemgr6
-rw-r--r--charms/trusty/neutron-contrail/files/contrail-vrouter.rules3
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/__init__.py38
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/__init__.py15
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/decorators.py57
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/files.py45
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/fstab.py134
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/hookenv.py898
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/host.py586
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/hugepage.py69
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/kernel.py68
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/__init__.py18
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/base.py353
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/helpers.py283
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/strutils.py72
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/sysctl.py56
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/templating.py68
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/core/unitdata.py521
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/__init__.py468
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/archiveurl.py167
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/bzrurl.py78
-rw-r--r--charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/giturl.py73
l---------charms/trusty/neutron-contrail/hooks/config-changed1
l---------charms/trusty/neutron-contrail/hooks/contrail-api-relation-broken1
l---------charms/trusty/neutron-contrail/hooks/contrail-api-relation-changed1
l---------charms/trusty/neutron-contrail/hooks/contrail-api-relation-departed1
l---------charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-broken1
l---------charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-changed1
l---------charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-departed1
l---------charms/trusty/neutron-contrail/hooks/control-node-relation-broken1
l---------charms/trusty/neutron-contrail/hooks/control-node-relation-departed1
l---------charms/trusty/neutron-contrail/hooks/control-node-relation-joined1
l---------charms/trusty/neutron-contrail/hooks/identity-admin-relation-broken1
l---------charms/trusty/neutron-contrail/hooks/identity-admin-relation-changed1
l---------charms/trusty/neutron-contrail/hooks/identity-admin-relation-departed1
l---------charms/trusty/neutron-contrail/hooks/install1
l---------charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-broken1
l---------charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-changed1
l---------charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-departed1
l---------charms/trusty/neutron-contrail/hooks/neutron-plugin-relation-joined1
-rwxr-xr-xcharms/trusty/neutron-contrail/hooks/neutron_contrail_hooks.py353
-rw-r--r--charms/trusty/neutron-contrail/hooks/neutron_contrail_utils.py478
l---------charms/trusty/neutron-contrail/hooks/start1
l---------charms/trusty/neutron-contrail/hooks/stop1
l---------charms/trusty/neutron-contrail/hooks/upgrade-charm1
-rw-r--r--charms/trusty/neutron-contrail/icon.svg309
-rw-r--r--charms/trusty/neutron-contrail/metadata.yaml39
-rwxr-xr-xcharms/trusty/neutron-contrail/scripts/create-vrouter.sh133
-rw-r--r--charms/trusty/neutron-contrail/scripts/interfaces7
-rw-r--r--charms/trusty/neutron-contrail/scripts/juju-header5
-rwxr-xr-xcharms/trusty/neutron-contrail/scripts/remove-juju-bridge.sh17
-rwxr-xr-xcharms/trusty/neutron-contrail/scripts/vhost-phys.sh6
-rw-r--r--charms/trusty/neutron-contrail/scripts/vrouter-interfaces.awk38
-rw-r--r--charms/trusty/neutron-contrail/templates/contrail-barbican-auth.conf13
-rw-r--r--charms/trusty/neutron-contrail/templates/contrail-vrouter-agent.conf44
-rw-r--r--charms/trusty/neutron-contrail/templates/contrail-vrouter-nodemgr.conf9
-rw-r--r--charms/trusty/neutron-contrail/templates/vnc_api_lib.ini16
-rw-r--r--charms/trusty/neutron-contrail/templates/vrouter-vgw.cfg21
-rw-r--r--charms/trusty/zookeeper/.bzr/README3
-rw-r--r--charms/trusty/zookeeper/.bzr/branch-format1
-rw-r--r--charms/trusty/zookeeper/.bzr/branch/branch.conf1
-rw-r--r--charms/trusty/zookeeper/.bzr/branch/format1
-rw-r--r--charms/trusty/zookeeper/.bzr/branch/last-revision1
-rw-r--r--charms/trusty/zookeeper/.bzr/branch/tags0
-rw-r--r--charms/trusty/zookeeper/.bzr/checkout/conflicts1
-rw-r--r--charms/trusty/zookeeper/.bzr/checkout/dirstatebin4224 -> 0 bytes
-rw-r--r--charms/trusty/zookeeper/.bzr/checkout/format1
-rw-r--r--charms/trusty/zookeeper/.bzr/checkout/views0
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/format1
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.cixbin766 -> 0 bytes
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.iixbin669 -> 0 bytes
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.rixbin677 -> 0 bytes
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.six5
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.tixbin1087 -> 0 bytes
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/pack-names7
-rw-r--r--charms/trusty/zookeeper/.bzr/repository/packs/3c2f9bccb787d7e4c58ceea02b4606c0.packbin14265 -> 0 bytes
-rw-r--r--charms/trusty/zookeeper/README.md10
-rw-r--r--charms/trusty/zookeeper/config.yaml39
-rw-r--r--charms/trusty/zookeeper/copyright17
l---------charms/trusty/zookeeper/hooks/install1
l---------charms/trusty/zookeeper/hooks/quorum-relation-changed1
l---------charms/trusty/zookeeper/hooks/quorum-relation-departed1
l---------charms/trusty/zookeeper/hooks/quorum-relation-joined1
l---------charms/trusty/zookeeper/hooks/start1
l---------charms/trusty/zookeeper/hooks/stop1
l---------charms/trusty/zookeeper/hooks/upgrade-charm1
-rwxr-xr-xcharms/trusty/zookeeper/hooks/zookeeper-common216
l---------charms/trusty/zookeeper/hooks/zookeeper-relation-joined1
-rw-r--r--charms/trusty/zookeeper/icon.svg292
-rw-r--r--charms/trusty/zookeeper/metadata.yaml19
-rw-r--r--charms/trusty/zookeeper/revision1
-rw-r--r--charms/trusty/zookeeper/tests/00-setup16
-rw-r--r--charms/trusty/zookeeper/tests/10-bundles-test.py33
-rw-r--r--charms/trusty/zookeeper/tests/bundles.yaml7
1106 files changed, 0 insertions, 123342 deletions
diff --git a/charms/trusty/cassandra/.bzr/README b/charms/trusty/cassandra/.bzr/README
deleted file mode 100644
index f82dc1c..0000000
--- a/charms/trusty/cassandra/.bzr/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a Bazaar control directory.
-Do not change any files in this directory.
-See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/cassandra/.bzr/branch-format b/charms/trusty/cassandra/.bzr/branch-format
deleted file mode 100644
index 9eb09b7..0000000
--- a/charms/trusty/cassandra/.bzr/branch-format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/cassandra/.bzr/branch/branch.conf b/charms/trusty/cassandra/.bzr/branch/branch.conf
deleted file mode 100644
index efb4cac..0000000
--- a/charms/trusty/cassandra/.bzr/branch/branch.conf
+++ /dev/null
@@ -1 +0,0 @@
-parent_location = bzr+ssh://bazaar.launchpad.net/~charmers/charms/trusty/cassandra/trunk/
diff --git a/charms/trusty/cassandra/.bzr/branch/format b/charms/trusty/cassandra/.bzr/branch/format
deleted file mode 100644
index dc392f4..0000000
--- a/charms/trusty/cassandra/.bzr/branch/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Branch Format 7 (needs bzr 1.6)
diff --git a/charms/trusty/cassandra/.bzr/branch/last-revision b/charms/trusty/cassandra/.bzr/branch/last-revision
deleted file mode 100644
index 4f71a92..0000000
--- a/charms/trusty/cassandra/.bzr/branch/last-revision
+++ /dev/null
@@ -1 +0,0 @@
-379 stuart.bishop@canonical.com-20160701064342-way4zlx1v8mg8902
diff --git a/charms/trusty/cassandra/.bzr/branch/tags b/charms/trusty/cassandra/.bzr/branch/tags
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/cassandra/.bzr/branch/tags
+++ /dev/null
diff --git a/charms/trusty/cassandra/.bzr/checkout/conflicts b/charms/trusty/cassandra/.bzr/checkout/conflicts
deleted file mode 100644
index 0dc2d3a..0000000
--- a/charms/trusty/cassandra/.bzr/checkout/conflicts
+++ /dev/null
@@ -1 +0,0 @@
-BZR conflict list format 1
diff --git a/charms/trusty/cassandra/.bzr/checkout/dirstate b/charms/trusty/cassandra/.bzr/checkout/dirstate
deleted file mode 100644
index 61566d1..0000000
--- a/charms/trusty/cassandra/.bzr/checkout/dirstate
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/checkout/format b/charms/trusty/cassandra/.bzr/checkout/format
deleted file mode 100644
index e0261c7..0000000
--- a/charms/trusty/cassandra/.bzr/checkout/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/cassandra/.bzr/checkout/views b/charms/trusty/cassandra/.bzr/checkout/views
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/cassandra/.bzr/checkout/views
+++ /dev/null
diff --git a/charms/trusty/cassandra/.bzr/repository/format b/charms/trusty/cassandra/.bzr/repository/format
deleted file mode 100644
index b200528..0000000
--- a/charms/trusty/cassandra/.bzr/repository/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar repository format 2a (needs bzr 1.16 or later)
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cix
deleted file mode 100644
index 62a8e1e..0000000
--- a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.cix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iix
deleted file mode 100644
index adccde0..0000000
--- a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.iix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rix
deleted file mode 100644
index 3a4e8f1..0000000
--- a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.rix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.six b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.six
deleted file mode 100644
index d6e0f15..0000000
--- a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.six
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tix b/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tix
deleted file mode 100644
index ba00536..0000000
--- a/charms/trusty/cassandra/.bzr/repository/indices/b2ff4c83d0b0f30e7527867288318107.tix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/pack-names b/charms/trusty/cassandra/.bzr/repository/pack-names
deleted file mode 100644
index f3fd19b..0000000
--- a/charms/trusty/cassandra/.bzr/repository/pack-names
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.pack b/charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.pack
deleted file mode 100644
index c9f37e0..0000000
--- a/charms/trusty/cassandra/.bzr/repository/packs/b2ff4c83d0b0f30e7527867288318107.pack
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/cassandra/.bzrignore b/charms/trusty/cassandra/.bzrignore
deleted file mode 100644
index 17b1268..0000000
--- a/charms/trusty/cassandra/.bzrignore
+++ /dev/null
@@ -1,9 +0,0 @@
-revision
-*.pyc
-.stamp-*
-.venv2
-.venv3
-tests/.venv2
-tests/.venv3
-.coverage
-coverage
diff --git a/charms/trusty/cassandra/Makefile b/charms/trusty/cassandra/Makefile
deleted file mode 100644
index 571af56..0000000
--- a/charms/trusty/cassandra/Makefile
+++ /dev/null
@@ -1,224 +0,0 @@
-#!/usr/bin/make -f
-
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-JUJU = juju
-
-default:
- @echo Missing target
- @echo 'Usage: make [ lint | unittest | test | clean | sync ]'
- env
-
-# Only trusty supported, but xenial expected soon.
-SERIES := $(shell $(JUJU) get-environment default-series)
-
-HOST_SERIES := $(shell lsb_release -sc)
-ifeq ($(HOST_SERIES),trusty)
- PYVER := 3.4
-else
- PYVER := 3.5
-endif
-
-
-# /!\ Ensure that errors early in pipes cause failures, rather than
-# overridden by the last stage of the pipe. cf. 'test.py | ts'
-SHELL := /bin/bash
-export SHELLOPTS:=errexit:pipefail
-
-
-# Calculate the CHARM_DIR (the directory this Makefile is in)
-THIS_MAKEFILE_PATH:=$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))
-CHARM_DIR:=$(shell cd $(dir $(THIS_MAKEFILE_PATH));pwd)
-VENV3:=$(CHARM_DIR)/.venv3
-
-# Set the PATH so the correct tools are found.
-export PATH:=$(VENV3)/bin:$(PATH)
-
-SITE_PACKAGES=$(wildcard $(VENV3)/lib/python*/site-packages)
-
-PIP=.venv3/bin/pip$(PYVER) -q
-NOSETESTS=.venv3/bin/nosetests-3.4 -sv # Yes, even with 3.5
-
-# Set pipefail so we can get sane error codes while tagging test output
-# with ts(1)
-SHELL=bash -o pipefail
-
-deps: packages venv3
-
-lint: deps
- date
- free --human
- charm proof $(CHARM_DIR)
- flake8 \
- --ignore=E402,E265 \
- --exclude=charmhelpers,.venv2,.venv3 hooks tests testing
- @echo OK: Lint free `date`
-
-unittest: lint
- $(NOSETESTS) \
- tests.test_actions --cover-package=actions \
- tests.test_helpers --cover-package=helpers \
- tests.test_definitions --cover-package=definitions \
- --with-coverage --cover-branches
- @echo OK: Unit tests pass `date`
-
-test: unittest
- AMULET_TIMEOUT=3600 \
- $(NOSETESTS) tests.test_integration
-
-ftest: unittest Test1UnitDeployment
-Test1UnitDeployment: deps
- date
- AMULET_TIMEOUT=5400 \
- $(NOSETESTS) tests.test_integration:Test1UnitDeployment 2>&1 | ts
-
-20test: unittest Test20Deployment
-Test20Deployment: deps
- date
- AMULET_TIMEOUT=5400 \
- $(NOSETESTS) tests.test_integration:Test20Deployment 2>&1 | ts
-
-21test: unittest Test21Deployment
-Test21Deployment: deps
- date
- AMULET_TIMEOUT=5400 \
- $(NOSETESTS) tests.test_integration:Test21Deployment 2>&1 | ts
-
-30test: unittest Test30Deployment
-Test30Deployment: deps
- date
- AMULET_TIMEOUT=5400 \
- $(NOSETESTS) tests.test_integration:Test30Deployment 2>&1 | ts
-
-3test: unittest Test3UnitDeployment
-Test3UnitDeployment: deps
- date
- AMULET_TIMEOUT=7200 \
- $(NOSETESTS) tests.test_integration:Test3UnitDeployment 2>&1 | ts
-
-authtest: unittest TestAllowAllAuthenticatorDeployment
-TestAllowAllAuthenticatorDeployment: deps
- date
- AMULET_TIMEOUT=7200 \
- $(NOSETESTS) \
- tests.test_integration:TestAllowAllAuthenticatorDeployment 2>&1 | ts
-
-# Place a copy of the Oracle Java SE 7 Server Runtime tarball in ./lib
-# to run these tests.
-jretest: unittest
- AMULET_TIMEOUT=5400 \
- $(NOSETESTS) tests.test_integration:TestOracleJREDeployment 2>&1 | ts
-
-# You need the Oracle JRE (per jretest) and set the DSE_SOURCE environment
-# variable for this to work:
-# DSE_SOURCE="deb http://un:pw@debian.datastax.com/enterprise stable main"
-# You will also need a cache like squid-deb-proxy and have tweaked it to
-# cache the authenticated files, or the tests will likely timeout waiting
-# for huge downloads to complete. Alternatively, mirror the DataStax
-# packages into your own private archive.
-dsetest: unittest
- AMULET_TIMEOUT=5400 \
- $(NOSETESTS) tests.test_integration:TestDSEDeployment 2>&1 | ts
-
-coverage: lint
- $(NOSETESTS) \
- tests.test_actions --cover-package=actions \
- tests.test_helpers --cover-package=helpers \
- tests.test_definitions --cover-package=definitions \
- --with-coverage --cover-branches \
- --cover-html --cover-html-dir=coverage \
- --cover-min-percentage=100 || \
- (gnome-open coverage/index.html; false)
-
-clean:
- rm -rf .venv? tests/.venv? .stamp-* coverage .coverage
- find . -name __pycache__ -type d | xargs rm -rf
-
-
-# Attempt to diagnose environment for test failures.
-debug:
- -which virtualenv
- -which python
- -which python2
- -which python3
- -which pip
- -which pip3
- -head -1 `which virtualenv || echo nothere`
- -python -c 'import sys; print(sys.version); print(sys.path);'
- -python2 -c 'import sys; print(sys.version); print(sys.path);'
- -python3 -c 'import sys; print(sys.version); print(sys.path);'
- -env
-
-
-packages: .stamp-packages
-.stamp-packages:
- # Install bootstrap debs, and Python packages used by the charm
- # to ensure versions match.
- sudo add-apt-repository -y ppa:stub/juju
- sudo add-apt-repository -y ppa:stub/cassandra
- sudo apt-get update
- sudo apt-get install -y \
- python3 python3-pip python3-apt python3-dev python-virtualenv \
- charm-tools build-essential libev4 libev-dev libffi-dev \
- netcat python3-jinja2 juju-wait moreutils \
- python3-cassandra python3-bcrypt
- touch .stamp-packages
-
-venv3: packages .stamp-venv3
-.stamp-venv3:
- # Build a Python virtualenv to run our tests.
- virtualenv -p python3 --system-site-packages ${VENV3}
-
- # Create a .pth so our tests can locate everything without
- # sys.path hacks.
- (echo ${CHARM_DIR}/hooks; echo ${CHARM_DIR}) \
- > ${VENV3}/lib/python${PYVER}/site-packages/tests.pth
-
- echo 'pip: ' `which pip`
-
- # Pip install packages needed by the test suite but not used
- # by the charm.
- $(PIP) install bcrypt cassandra-driver blist
- $(PIP) install --upgrade -I nose flake8
- $(PIP) install --upgrade coverage amulet mock juju-deployer juju-wait
-
- echo 'nosetests:' `which nosetests`
- echo 'flake8:' `which flake8`
-
- # Create a link for test shebang lines.
- (cd tests && ln -s ${VENV3} .venv3)
-
- touch .stamp-venv3
-
-venv2: packages .stamp-venv2
-.stamp-venv2:
- virtualenv -p python2.7 --system-site-packages .venv2
- .venv2/bin/pip install -q bundletester
- touch .stamp-venv2
-
-bundletest: venv2
- .venv2/bin/bundletester
-
-sync:
- @bzr cat \
- lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > .charm_helpers_sync.py
- @python .charm_helpers_sync.py -c charm-helpers.yaml
- #@python .charm_helpers_sync.py \
- # -c lib/testcharms/testclient/charm-helpers.yaml
- @rm .charm_helpers_sync.py
diff --git a/charms/trusty/cassandra/README.md b/charms/trusty/cassandra/README.md
deleted file mode 100644
index 00784cc..0000000
--- a/charms/trusty/cassandra/README.md
+++ /dev/null
@@ -1,179 +0,0 @@
-# Overview
-
-The Apache Cassandra database is the right choice when you need scalability
-and high availability without compromising performance. Linear scalability
-and proven fault-tolerance on commodity hardware or cloud infrastructure
-make it the perfect platform for mission-critical data. Cassandra's support
-for replicating across multiple datacenters is best-in-class, providing lower
-latency for your users and the peace of mind of knowing that you can survive
-regional outages.
-
-See [cassandra.apache.org](http://cassandra.apache.org) for more information.
-
-
-# Editions
-
-This charm supports Apache Cassandra 2.0, 2.1, 2.2 & 3.0, and
-Datastax Enterprise 4.7 & 4.8. The default is Apache Cassandra 2.2.
-
-To use Apache Cassandra 2.0, specify the Apache Cassandra 2.0 archive source
-in the `install_sources` config setting when deploying.
-
-To use Datastax Enterprise, set the `edition` config setting to `dse`
-and the Datastax Enterprise archive URL in `install_sources` (including your
-username and password).
-
-
-# Deployment
-
-Cassandra deployments are relatively simple in that they consist of a set of
-Cassandra nodes which seed from each other to create a ring of servers:
-
- juju deploy -n3 cs:trusty/cassandra
-
-The service units will deploy and will form a single ring.
-
-New nodes can be added to scale up:
-
- juju add-unit cassandra
-
-
-/!\ Nodes must be manually decommissioned before dropping a unit.
-
- juju run --unit cassandra/1 "nodetool decommission"
- # Wait until Mode is DECOMMISSIONED
- juju run --unit cassandra/1 "nodetool netstats"
- juju remove-unit cassandra/1
-
-It is recommended to deploy at least 3 nodes and configure all your
-keyspaces to have a replication factor of three. Using fewer nodes or
-neglecting to set your keyspaces' replication settings means that your
-data is at risk and availability lower, as a failed unit may take the
-only copy of data with it.
-
-Production systems will normally want to set `max_heap_size` and
-`heap_newsize` to the empty string, to enable automatic memory size
-tuning. The defaults have been chosen to be suitable for development
-environments but will perform poorly with real workloads.
-
-
-## Planning
-
-- Do not attempt to store too much data per node. If you need more space,
- add more nodes. Most workloads work best with a capacity under 1TB
- per node.
-
-- You need to keep 50% of your disk space free for Cassandra maintenance
- operations. If you expect your nodes to hold 500GB of data each, you
- will need a 1TB partition. Using non-default compaction such as
- LeveledCompactionStrategy can lower this waste.
-
-- Much more information can be found in the [Cassandra 2.2 documentation](http://docs.datastax.com/en/cassandra/2.2/cassandra/planning/planPlanningAbout.html)
-
-
-## Network Access
-
-The default Cassandra packages are installed from the apache.org
-archive. To avoid this download, place a copy of the packages in a local
-archive and specify its location in the `install_sources` configuration
-option. The signing key is automatically added.
-
-When using DataStax Enterprise, you need to specify the archive location
-containing the DataStax Enterprise .deb packages in the
-`install_sources` configuration item, and the signing key in the
-`install_keys` configuration item. Place the DataStax packages in a
-local archive to avoid downloading from datastax.com.
-
-
-## Oracle Java SE
-
-Cassandra recommends using Oracle Java SE 8. Unfortunately, this
-software is accessible only after accepting Oracle's click-through
-license making deployments using it much more cumbersome. You will need
-to download the Oracle Java SE 8 Server Runtime for Linux, and place the
-tarball at a URL accessible to your deployed units. The config item
-`private_jre_url` needs to be set to this URL.
-
-
-# Usage
-
-To relate the Cassandra charm to a service that understands how to talk to
-Cassandra using Thrift or the native Cassandra protocol::
-
- juju deploy cs:service-that-needs-cassandra
- juju add-relation service-that-needs-cassandra cassandra:database
-
-
-Alternatively, if you require a superuser connection, use the
-`database-admin` relation instead of `database`::
-
- juju deploy cs:admin-service
- juju add-relation admin-service cassandra:database-admin
-
-
-Client charms need to provide nothing. The Cassandra service publishes the
-following connection settings and cluster information on the client's relation:
-
-`username` and `password`:
-
- Authentication credentials. The cluster is configured to use
- the standard PasswordAuthenticator authentication provider, rather
- than the insecure default. You can use different credentials
- if you wish, using an account created through some other mechanism.
-
-`host`:
-
- IP address to connect to.
-
-`native_transport_port`:
-
- Port for drivers and tools using the newer native protocol.
-
-`rpc_port`:
-
- Port for drivers and tools using the legacy Thrift protocol.
-
-`cluster_name`:
-
- The cluster name. A client service may be related to several
- Cassandra services, and this setting may be used to tell which
- services belong to which cluster.
-
-`datacenter` and `rack`:
-
- The datacenter and rack units in this service belong to. Required for
- setting keyspace replication correctly.
-
-The cluster is configured to use the recommended 'snitch'
-(GossipingPropertyFileSnitch), so you will need to configure replication of
-your keyspaces using the NetworkTopologyStrategy replica placement strategy.
-For example, using the default datacenter named 'juju':
-
- CREATE KEYSPACE IF NOT EXISTS mydata WITH REPLICATION =
- { 'class': 'NetworkTopologyStrategy', 'juju': 3};
-
-
-Although authentication is configured using the standard
-PasswordAuthentication, by default no authorization is configured
-and the provided credentials will have access to all data on the cluster.
-For more granular permissions, you will need to set the authorizer
-in the service configuration to CassandraAuthorizer and manually grant
-permissions to the users.
-
-
-# Known Limitations and Issues
-
-This is the 'trusty' charm. Upgrade from the 'precise' charm is not supported.
-
-The `system_auth` keyspace replication factor is automatically increased
-but not decreased. If you have a service with three or more units and
-decommission enough nodes to drop below three, you will need to manually
-update the `system_auth` keyspace replication settings.
-
-
-# Contact Information
-
-## Cassandra
-
-- [Apache Cassandra homepage](http://cassandra.apache.org/)
-- [Cassandra Getting Started](http://wiki.apache.org/cassandra/GettingStarted)
diff --git a/charms/trusty/cassandra/README.storage b/charms/trusty/cassandra/README.storage
deleted file mode 100644
index 4a71179..0000000
--- a/charms/trusty/cassandra/README.storage
+++ /dev/null
@@ -1,41 +0,0 @@
-= Persistent storage =
-
-/!\ Unfortunately, per Bug #1334956, the storage and block-storage-broker
- charms are not yet available in the charm store for trusty so
- this documentation does not work as written.
-
-The cassandra charm takes advantage of the storage subordinate charm
-and the block-storage-broker charm. With these two charms cassandra can
-either request new volumes are created or use existing volumes.
-
-For requesting new volume creation, just set external_volume_mount in
-the cassandra charm and root, provider and volume_size for the storage
-subordinate charm.
-
-If using existing volumes, also set the volume_map for the storage
-subordinate charm.
-
-
-Example using existing volumes:
-
-juju deploy -n 3 cassandra
-juju set cassandra external_volume_mount="/srv/data"
-
-juju deploy storage
-juju set storage
- root=/srv/data
- provider=block-storage-broker
- volume_size=10
- # Volume map needed for existing storage
- volume_map="{cassandra/0: e09be717-384b-43e3-b06a-3a68b5a2252d,
- cassandra/1: ebd35228-6972-4e22-86a8-37483581154a,
- cassandra/2: 9f02b67b-72da-4e22-98ee-10a95c1b298d}"
-
-juju deploy block-storage-broker
-
-juju add-relation storage cassandra
-juju add-relation storage block-storage-broker
-
-
-See the READMEs for the storage subordinate charm and the
-block-storage-broker charm for more detail on persistent storage.
diff --git a/charms/trusty/cassandra/charm-helpers.yaml b/charms/trusty/cassandra/charm-helpers.yaml
deleted file mode 100644
index da3786b..0000000
--- a/charms/trusty/cassandra/charm-helpers.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-destination: hooks/charmhelpers
-#branch: lp:charm-helpers
-branch: lp:~stub/charm-helpers/integration
-include:
- - coordinator
- - core
- - fetch
- - contrib.charmsupport
- - contrib.templating.jinja
- - contrib.network.ufw
- - contrib.benchmark
diff --git a/charms/trusty/cassandra/config.yaml b/charms/trusty/cassandra/config.yaml
deleted file mode 100644
index 6ec68a5..0000000
--- a/charms/trusty/cassandra/config.yaml
+++ /dev/null
@@ -1,316 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-options:
- # Install and upgrade charm related options
- extra_packages:
- type: string
- default: ""
- description: >
- Extra packages to install. A space delimited list of packages.
- package_status:
- default: "install"
- type: string
- description: >
- The status of service-affecting packages will be set to this
- value in the dpkg database. Useful valid values are "install"
- and "hold".
- install_sources:
- type: string
- description: >
- charm-helpers standard listing of package install sources.
- If you are using Datastax Enterprise, you will need to
- override one defaults with your own username and password.
- default: |
- - deb http://www.apache.org/dist/cassandra/debian 22x main
- - ppa:openjdk-r/ppa # For OpenJDK 8
- - ppa:stub/cassandra # For Python driver
- # - deb http://debian.datastax.com/community stable main
- # DSE requires you to register and add your username/password here.
- # - deb http://un:pw@debian.datastax.com/enterprise stable main
- install_keys:
- type: string
- description: >
- charm-helpers standard listing of package install source
- signing keys, corresponding to install_sources.
- default: |
- - null # ppa:stub/cassandra signing key added automatically.
- - null # Apache package signing key added automatically.
- - null # PPA package signing key added automatically.
- # - null # DataStack package signing key added automatically.
- http_proxy:
- type: string
- description: >
- Value for the http_proxy and https_proxy environment variables.
- This causes pip(1) and other tools to perform downloads via
- the proxy server. eg. http://squid.dc1.lan:8080
- default: ""
- # TODO: Add JNA
- # private_jna_url:
- # type: string
- # default: ""
- # description: >
- # URL for the private jna tar file. DSE requires JNA >= 3.4
- private_jre_url:
- type: string
- default: ""
- description: >
- URL for the private jre tar file. DSE requires
- Oracle Java SE 8 Server JRE (eg. server-jre-8u60-linux-x64.tar.gz).
- edition:
- type: string
- default: community
- description: >
- One of 'community' or 'dse'. 'community' uses the
- Apache Cassandra packages. 'dse' is for DataStax
- Enterprise. Selecting 'dse' overrides the jvm setting.
- jre:
- type: string
- default: openjdk
- description: >
- Which Java runtime environment to use. May be 'openjdk' or
- 'oracle'.
-
- # Storage configuration
- wait_for_storage_broker:
- type: boolean
- default: False
- description: >
- Do not start the service before external storage has been
- mounted using the block storage broker relation. If you do
- not set this and you relate the service to the storage
- broker, then your service will have started up using local
- disk, and later torn down and rebuilt when the external
- storage became available.
- data_file_directories:
- type: string
- default: data
- description: >
- Space delimited data directories. Use multiple data
- directories to split data over multiple physical hardware
- drive partitions. Paths are relative to /var/lib/cassandra
- or the block storage broker external mount point.
- commitlog_directory:
- type: string
- default: commitlog
- description: >
- Commit log directory. The path is relative to
- /var/lib/cassandra or the block storage broker external
- mount point.
- saved_caches_directory:
- type: string
- default: saved_caches
- description: >
- Saved caches directory. The path is relative to
- /var/lib/cassandra or the block storage broker external
- mount point.
- io_scheduler:
- type: string
- default: "noop"
- description: >
- Set kernel io scheduler for persistent storage.
- https://www.kernel.org/doc/Documentation/block/switching-sched.txt
-
-## units-to-update:
-## type: string
-## default: "all"
-## description: >
-## Comma separated list of unit numbers to update (i.e. modify
-## /etc setup and trigger cassandra restart on config-change or
-## upgrade-charm), or "all".
-
- # nrpe-external-master relation related options
- nagios_context:
- default: "juju"
- type: string
- description: |
- Used by the nrpe subordinate charms.
- A string that will be prepended to instance name to set the host name
- in nagios. So for instance the hostname would be something like:
- juju-myservice-0
- If you're running multiple environments with the same services in them
- this allows you to differentiate between them.
- nagios_servicegroups:
- default: ""
- type: string
- description: >
- A comma-separated list of nagios servicegroups.
- If left empty, the nagios_context will be used as the servicegroup
- nagios_heapchk_warn_pct:
- default: 80
- type: int
- description: >
- The pct of heap used to trigger a nagios warning
- nagios_heapchk_crit_pct:
- default: 90
- type: int
- description: >
- The pct of heap used to trigger a nagios critcal alert
- nagios_disk_warn_pct:
- default: 50
- type: int
- description: >
- The pct of data disk used to trigger a nagios warning
- nagios_disk_crit_pct:
- default: 25
- type: int
- description: >
- The pct of data disk used to trigger a nagios critcal alert
-
- # cassandra-env.sh related options
- # The default tuning has been chosen to be suitable for development
- # and test environments. The default tunings are expected to change
- # over time.
- max_heap_size:
- type: string
- default: '384M'
- description: >
- Total size of Java memory heap, for example 1G or 512M.
- If you set this, you should also set heap_newsize. The
- default is automatically tuned.
- heap_newsize:
- type: string
- default: '32M'
- description: >
- The size of the JVM's young generation in the heap. If you
- set this, you should also set max_heap_size. If in doubt,
- go with 100M per physical CPU core. The default is
- automatically tuned.
-
- # Cassandra.yaml related options
- cluster_name:
- type: string
- default: "juju"
- description: >
- Name of the Cassandra cluster. This is mainly used to
- prevent machines in one logical cluster from joining
- another. All Cassandra services you wish to cluster together
- must have the same cluster_name. This setting cannot be changed
- after service deployment.
- partitioner:
- default: Murmur3Partitioner
- type: string
- description: >
- The cassandra partitioner to use. Use Murmur3Partitioner,
- unless another is required for backwards compatibility.
- num_tokens:
- type: int
- default: 256
- description: Number of tokens per node.
-
- # Topology of the service in the cluster.
- datacenter:
- type: string
- default: "juju"
- description: >
- The node's datacenter used by the endpoint_snitch. e.g. "DC1".
- It cannot be changed after service deployment.
- rack:
- type: string
- default: ""
- description: >
- The rack used by the endpoint_snitch for all units in this
- service. e.g. "Rack1". This cannot be changed after deployment.
- It defaults to the service name. Cassandra will store replicated
- data in different racks whenever possible.
-
-
- # Networking options.
- native_transport_port:
- type: int
- default: 9042
- description: Native protocol port for native protocol clients.
- rpc_port:
- type: int
- default: 9160
- description: Thrift protocol port for legacy clients.
- storage_port:
- type: int
- default: 7000
- description: Cluster communication port
- ssl_storage_port:
- type: int
- default: 7001
- description: >
- Cluster secure communication port. TODO: Unused. configure SSL.
- authenticator:
- type: string
- default: PasswordAuthenticator
- description: >
- Authentication backend. Only PasswordAuthenticator and
- AllowAllAuthenticator are supported. You should only
- use AllowAllAuthenticator for legacy applications that
- cannot provide authentication credentials.
- authorizer:
- type: string
- default: AllowAllAuthorizer
- description: >
- Authorization backend, implementing IAuthorizer; used to limit
- access/provide permissions Out of the box, Cassandra provides
- AllowAllAuthorizer & CassandraAuthorizer
- - AllowAllAuthorizer allows any action to any user - set it to
- disable authorization.
- - CassandraAuthorizer stores permissions in
- system_auth.permissions table.
-
-
- # Tuning options.
- compaction_throughput_mb_per_sec:
- type: int
- default: 16
- description: >
- Throttles compaction to the given total throughput (in MB/sec)
- across the entire system. The faster you insert data, the faster
- you need to compact in order to keep the sstable count down,
- but in general, setting this to 16 to 32 times the rate you
- are inserting data is more than sufficient. Setting this to
- 0 disables throttling. Note that this account for all types
- of compaction, including validation compaction.
- stream_throughput_outbound_megabits_per_sec:
- type: int
- default: 200
- description: >
- Throttles all outbound streaming file transfers on nodes to the
- given total throughput in Mbps. This is necessary because Cassandra
- does mostly sequential IO when streaming data during bootstrap or
- repair, which can lead to saturating the network connection and
- degrading rpc performance. When unset, the default is 200 Mbps
- or 25 MB/s. 0 to disable throttling.
-
- tombstone_warn_threshold:
- type: int
- default: 1000
- description: >
- When executing a scan, within or across a partition, we
- need to keep the tombstones seen in memory so we can return
- them to the coordinator, which will use them to make sure
- other replicas also know about the deleted rows. With
- workloads that generate a lot of tombstones, this can cause
- performance problems and even exaust the server heap. Adjust
- the thresholds here if you understand the dangers and want
- to scan more tombstones anyway.
- tombstone_failure_threshold:
- type: int
- default: 100000
- description: >
- When executing a scan, within or across a partition, we need
- to keep the tombstones seen in memory so we can return them
- to the coordinator, which will use them to make sure other
- replicas also know about the deleted rows. With workloads
- that generate a lot of tombstones, this can cause
- performance problems and even exaust the server heap. Adjust
- the thresholds here if you understand the dangers and want
- to scan more tombstones anyway.
diff --git a/charms/trusty/cassandra/copyright b/charms/trusty/cassandra/copyright
deleted file mode 100644
index 7902f54..0000000
--- a/charms/trusty/cassandra/copyright
+++ /dev/null
@@ -1,19 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2013, Canonical Ltd., All Rights Reserved.
-License: AGPL-3
-This file is part of the Cassandra charm.
-Copyright 2015 Canonical Ltd.
-.
-This program is free software: you can redistribute it and/or modify it
-under the terms of the GNU General Public License version 3, as
-published by the Free Software Foundation.
-.
-This program is distributed in the hope that it will be useful, but
-WITHOUT ANY WARRANTY; without even the implied warranties of
-MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-PURPOSE. See the GNU General Public License for more details.
-.
-You should have received a copy of the GNU General Public License along
-with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/files/check_cassandra_heap.sh b/charms/trusty/cassandra/files/check_cassandra_heap.sh
deleted file mode 100644
index 395aa06..0000000
--- a/charms/trusty/cassandra/files/check_cassandra_heap.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/bash
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-
-# Copyright (C) 2012 Canonical Ltd.
-# Author: Liam Young
-#
-# Script used to check Cassandra is alive and that it has space left in the heap
-
-set -u
-
-if [[ $# -lt 3 ]]; then
- echo "$0 <jmx-ipadress> <warnpct> <criticalpct>"
- exit 1
-fi
-WARN_PCT=$2
-CRIT_PCT=$3
-
-NODE_INF0="$(nodetool -h $1 info 2>/dev/null)"
-if [[ $? -ne 0 ]]; then
- echo "ERROR: Failed to connect to Cassandra"
- exit 2
-fi
-PCT_USED=$(echo "$NODE_INF0" | awk 'BEGIN {FS=":"} $1 ~ /^Heap Memory/ {print $2}' | awk '{ printf("%i\n", $1*100/$3) }')
-USAGE_INFO="${PCT_USED}% of heap memory used"
-if [[ $PCT_USED -lt $WARN_PCT ]]; then
- echo "OK: ${USAGE_INFO}"
- exit 0
-elif [[ $PCT_USED -lt $CRIT_PCT ]]; then
- echo "WARNING: ${USAGE_INFO}"
- exit 1
-else
- echo "CRITICAL: ${USAGE_INFO}"
- exit 1
-fi
diff --git a/charms/trusty/cassandra/hooks/actions.py b/charms/trusty/cassandra/hooks/actions.py
deleted file mode 100644
index 8887056..0000000
--- a/charms/trusty/cassandra/hooks/actions.py
+++ /dev/null
@@ -1,990 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from contextlib import closing
-import errno
-from functools import wraps
-import glob
-import os.path
-import re
-import shlex
-import socket
-import subprocess
-from textwrap import dedent
-import time
-import urllib.request
-
-from charmhelpers import fetch
-from charmhelpers.contrib.charmsupport import nrpe
-from charmhelpers.contrib.network import ufw
-from charmhelpers.contrib.templating import jinja
-from charmhelpers.core import hookenv, host
-from charmhelpers.core.fstab import Fstab
-from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING
-
-import cassandra
-
-from coordinator import coordinator
-import helpers
-import relations
-
-
-# These config keys cannot be changed after service deployment.
-UNCHANGEABLE_KEYS = set(['cluster_name', 'datacenter', 'rack', 'edition'])
-
-# If any of these config items are changed, Cassandra needs to be
-# restarted and maybe remounted.
-RESTART_REQUIRED_KEYS = set([
- 'data_file_directories',
- 'commitlog_directory',
- 'saved_caches_directory',
- 'storage_port',
- 'ssl_storage_port',
- 'rpc_port',
- 'native_transport_port',
- 'partitioner',
- 'num_tokens',
- 'max_heap_size',
- 'heap_newsize',
- 'authenticator',
- 'authorizer',
- 'compaction_throughput_mb_per_sec',
- 'stream_throughput_outbound_megabits_per_sec',
- 'tombstone_warn_threshold',
- 'tombstone_failure_threshold',
- 'jre',
- 'private_jre_url'])
-
-ALL_CONFIG_KEYS = UNCHANGEABLE_KEYS.union(RESTART_REQUIRED_KEYS)
-
-
-# All other config items. By maintaining both lists, we can detect if
-# someone forgot to update these lists when they added a new config item.
-RESTART_NOT_REQUIRED_KEYS = set([
- 'extra_packages',
- 'package_status',
- 'install_sources',
- 'install_keys',
- 'http_proxy',
- 'wait_for_storage_broker',
- 'io_scheduler',
- 'nagios_context',
- 'nagios_servicegroups',
- 'nagios_heapchk_warn_pct',
- 'nagios_heapchk_crit_pct',
- 'nagios_disk_warn_pct',
- 'nagios_disk_crit_pct'])
-
-
-def action(func):
- '''Log and call func, stripping the undesirable servicename argument.
- '''
- @wraps(func)
- def wrapper(servicename, *args, **kw):
- if hookenv.remote_unit():
- hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(),
- func.__name__,
- hookenv.remote_unit()))
- else:
- hookenv.log("** Action {}/{}".format(hookenv.hook_name(),
- func.__name__))
- return func(*args, **kw)
- return wrapper
-
-
-def leader_only(func):
- '''Decorated function is only run on the leader.'''
- @wraps(func)
- def wrapper(*args, **kw):
- if hookenv.is_leader():
- return func(*args, **kw)
- else:
- return None
- return wrapper
-
-
-def authentication(func):
- '''Decorated function is skipped if authentication is disabled.'''
- @wraps(func)
- def wrapper(*args, **kw):
- auth = hookenv.config()['authenticator']
- if auth == 'PasswordAuthenticator':
- return func(*args, **kw)
- elif auth == 'AllowAllAuthenticator':
- hookenv.log('Skipped. Authentication disabled.', DEBUG)
- return None
- helpers.status_set('blocked', 'Unknown authenticator {}'.format(auth))
- raise SystemExit(0)
- return wrapper
-
-
-@action
-def set_proxy():
- import hooks
- hooks.set_proxy()
-
-
-@action
-def revert_unchangeable_config():
- config = hookenv.config()
-
- # config.previous() only becomes meaningful after the install
- # hook has run. During the first run on the unit hook, it
- # reports everything has having None as the previous value.
- if config._prev_dict is None:
- return
-
- for key in UNCHANGEABLE_KEYS:
- if config.changed(key):
- previous = config.previous(key)
- hookenv.log('{} cannot be changed after service deployment. '
- 'Using original setting {!r}'.format(key, previous),
- ERROR)
- config[key] = previous
-
-
-# FOR CHARMHELPERS
-@action
-def preinstall():
- '''Preinstallation data_ready hook.'''
- # Only run the preinstall hooks from the actual install hook.
- if hookenv.hook_name() == 'install':
- # Pre-exec
- pattern = os.path.join(hookenv.charm_dir(),
- 'exec.d', '*', 'charm-pre-install')
- for f in sorted(glob.glob(pattern)):
- if os.path.isfile(f) and os.access(f, os.X_OK):
- hookenv.log('Running preinstall hook {}'.format(f))
- subprocess.check_call(['sh', '-c', f])
- else:
- hookenv.log('Ingnoring preinstall hook {}'.format(f),
- WARNING)
- else:
- hookenv.log('No preinstall hooks found')
-
-
-# FOR CHARMHELPERS
-@action
-def swapoff(fstab='/etc/fstab'):
- '''Turn off swapping in the container, permanently.'''
- # Turn off swap in the current session
- if helpers.is_lxc():
- hookenv.log("In an LXC. Not touching swap.")
- return
- else:
- try:
- subprocess.check_call(['swapoff', '-a'])
- except Exception as e:
- hookenv.log("Got an error trying to turn off swapping. {}. "
- "We may be in an LXC. Exiting gracefully"
- "".format(e), WARNING)
- return
-
- # Disable swap permanently
- with closing(Fstab(fstab)) as fstab:
- while True:
- swap_entry = fstab.get_entry_by_attr('filesystem', 'swap')
- if swap_entry is None:
- break
- fstab.remove_entry(swap_entry)
-
-
-# FOR CHARMHELPERS
-@action
-def configure_sources():
- '''Standard charmhelpers package source configuration.'''
- config = hookenv.config()
- if config.changed('install_sources') or config.changed('install_keys'):
- fetch.configure_sources(True)
-
-
-@action
-def add_implicit_package_signing_keys():
- # Rather than blindly add these keys, we should sniff
- # config['install_sources'] for apache.org or datastax.com urls and
- # add only the appropriate keys.
- for key in ('apache', 'datastax'):
- path = os.path.join(hookenv.charm_dir(), 'lib', '{}.key'.format(key))
- subprocess.check_call(['apt-key', 'add', path],
- stdin=subprocess.DEVNULL)
-
-
-@action
-def reset_sysctl():
- '''Configure sysctl settings for Cassandra'''
- if helpers.is_lxc():
- hookenv.log("In an LXC. Leaving sysctl unchanged.")
- else:
- cassandra_sysctl_file = os.path.join('/', 'etc', 'sysctl.d',
- '99-cassandra.conf')
- contents = b"vm.max_map_count = 131072\n"
- try:
- host.write_file(cassandra_sysctl_file, contents)
- subprocess.check_call(['sysctl', '-p', cassandra_sysctl_file])
- except OSError as e:
- if e.errno == errno.EACCES:
- hookenv.log("Got Permission Denied trying to set the "
- "sysctl settings at {}. We may be in an LXC. "
- "Exiting gracefully".format(cassandra_sysctl_file),
- WARNING)
- else:
- raise
-
-
-@action
-def reset_limits():
- '''Set /etc/security/limits.d correctly for Ubuntu, so the
- startup scripts don't emit a spurious warning.
-
- Per Cassandra documentation, Ubuntu needs some extra
- twiddling in /etc/security/limits.d. I have no idea why
- the packages don't do this, since they are already
- setting limits for the cassandra user correctly. The real
- bug is that the limits of the user running the startup script
- are being checked, rather than the limits of the user that will
- actually run the process.
- '''
- contents = dedent('''\
- # Maintained by Juju
- root - memlock unlimited
- root - nofile 100000
- root - nproc 32768
- root - as unlimited
- ubuntu - memlock unlimited
- ubuntu - nofile 100000
- ubuntu - nproc 32768
- ubuntu - as unlimited
- ''')
- host.write_file('/etc/security/limits.d/cassandra-charm.conf',
- contents.encode('US-ASCII'))
-
-
-@action
-def install_cassandra_packages():
- helpers.install_packages(helpers.get_cassandra_packages())
- if helpers.get_jre() != 'oracle':
- subprocess.check_call(['update-java-alternatives',
- '--jre-headless',
- '--set', 'java-1.8.0-openjdk-amd64'])
-
-
-@action
-def ensure_cassandra_package_status():
- helpers.ensure_package_status(helpers.get_cassandra_packages())
-
-
-def _fetch_oracle_jre():
- config = hookenv.config()
- url = config.get('private_jre_url', None)
- if url and config.get('retrieved_jre', None) != url:
- filename = os.path.join(hookenv.charm_dir(),
- 'lib', url.split('/')[-1])
- if not filename.endswith('-linux-x64.tar.gz'):
- helpers.status_set('blocked',
- 'Invalid private_jre_url {}'.format(url))
- raise SystemExit(0)
- helpers.status_set(hookenv.status_get()[0],
- 'Downloading Oracle JRE')
- hookenv.log('Oracle JRE URL is {}'.format(url))
- urllib.request.urlretrieve(url, filename)
- config['retrieved_jre'] = url
-
- pattern = os.path.join(hookenv.charm_dir(),
- 'lib', 'server-jre-?u*-linux-x64.tar.gz')
- tarballs = glob.glob(pattern)
- if not (url or tarballs):
- helpers.status_set('blocked',
- 'private_jre_url not set and no local tarballs.')
- raise SystemExit(0)
-
- elif not tarballs:
- helpers.status_set('blocked',
- 'Oracle JRE tarball not found ({})'.format(pattern))
- raise SystemExit(0)
-
- # Latest tarball by filename/version num. Lets hope they don't hit
- # 99 (currently at 76).
- tarball = sorted(tarballs)[-1]
- return tarball
-
-
-def _install_oracle_jre_tarball(tarball):
- # Same directory as webupd8 to avoid surprising people, but it could
- # be anything.
- if 'jre-7u' in str(tarball):
- dest = '/usr/lib/jvm/java-7-oracle'
- else:
- dest = '/usr/lib/jvm/java-8-oracle'
-
- if not os.path.isdir(dest):
- host.mkdir(dest)
-
- jre_exists = os.path.exists(os.path.join(dest, 'bin', 'java'))
-
- config = hookenv.config()
-
- # Unpack the latest tarball if necessary.
- if config.get('oracle_jre_tarball', '') == tarball and jre_exists:
- hookenv.log('Already installed {}'.format(tarball))
- else:
- hookenv.log('Unpacking {}'.format(tarball))
- subprocess.check_call(['tar', '-xz', '-C', dest,
- '--strip-components=1', '-f', tarball])
- config['oracle_jre_tarball'] = tarball
-
- # Set alternatives, so /usr/bin/java does what we want.
- for tool in ['java', 'javac']:
- tool_path = os.path.join(dest, 'bin', tool)
- subprocess.check_call(['update-alternatives', '--install',
- os.path.join('/usr/bin', tool),
- tool, tool_path, '1'])
- subprocess.check_call(['update-alternatives',
- '--set', tool, tool_path])
-
-
-@action
-def install_oracle_jre():
- if helpers.get_jre() != 'oracle':
- return
-
- tarball = _fetch_oracle_jre()
- _install_oracle_jre_tarball(tarball)
-
-
-@action
-def emit_java_version():
- # Log the version for posterity. Could be useful since Oracle JRE
- # security updates are not automated.
- version = subprocess.check_output(['java', '-version'],
- universal_newlines=True)
- for line in version.splitlines():
- hookenv.log('JRE: {}'.format(line))
-
-
-@action
-def emit_meminfo():
- helpers.emit(subprocess.check_output(['free', '--human'],
- universal_newlines=True))
-
-
-@action
-def configure_cassandra_yaml():
- helpers.configure_cassandra_yaml()
-
-
-@action
-def configure_cassandra_env():
- cassandra_env_path = helpers.get_cassandra_env_file()
- assert os.path.exists(cassandra_env_path)
-
- helpers.maybe_backup(cassandra_env_path)
-
- overrides = [
- ('max_heap_size', re.compile(r'^#?(MAX_HEAP_SIZE)=(.*)$', re.M)),
- ('heap_newsize', re.compile(r'^#?(HEAP_NEWSIZE)=(.*)$', re.M)),
- # We don't allow this to be overridden to ensure that tools
- # will find JMX using the default port.
- # ('jmx_port', re.compile(r'^#?(JMX_PORT)=(.*)$', re.M)),
- ]
-
- with open(cassandra_env_path, 'r') as f:
- env = f.read()
-
- config = hookenv.config()
- for key, regexp in overrides:
- if config[key]:
- val = shlex.quote(str(config[key]))
- env = regexp.sub(r'\g<1>={}'.format(val),
- env)
- else:
- env = regexp.sub(r'#\1=\2', env)
- host.write_file(cassandra_env_path, env.encode('UTF-8'))
-
-
-@action
-def configure_cassandra_rackdc():
- config = hookenv.config()
- datacenter = config['datacenter'].strip()
- rack = config['rack'].strip() or hookenv.service_name()
- rackdc_properties = dedent('''\
- dc={}
- rack={}
- ''').format(datacenter, rack)
- rackdc_path = helpers.get_cassandra_rackdc_file()
- host.write_file(rackdc_path, rackdc_properties.encode('UTF-8'))
-
-
-def needs_reset_auth_keyspace_replication():
- '''Guard for reset_auth_keyspace_replication.'''
- num_nodes = helpers.num_nodes()
- datacenter = hookenv.config()['datacenter']
- with helpers.connect() as session:
- strategy_opts = helpers.get_auth_keyspace_replication(session)
- rf = int(strategy_opts.get(datacenter, -1))
- hookenv.log('system_auth rf={!r}'.format(strategy_opts))
- # If the node count has changed, we should change the rf.
- return rf != num_nodes
-
-
-@leader_only
-@action
-@authentication
-@coordinator.require('repair', needs_reset_auth_keyspace_replication)
-def reset_auth_keyspace_replication():
- # Cassandra requires you to manually set the replication factor of
- # the system_auth keyspace, to ensure availability and redundancy.
- # The recommendation is to set the replication factor so that every
- # node has a copy.
- num_nodes = helpers.num_nodes()
- datacenter = hookenv.config()['datacenter']
- with helpers.connect() as session:
- strategy_opts = helpers.get_auth_keyspace_replication(session)
- rf = int(strategy_opts.get(datacenter, -1))
- hookenv.log('system_auth rf={!r}'.format(strategy_opts))
- if rf != num_nodes:
- strategy_opts['class'] = 'NetworkTopologyStrategy'
- strategy_opts[datacenter] = num_nodes
- if 'replication_factor' in strategy_opts:
- del strategy_opts['replication_factor']
- helpers.set_auth_keyspace_replication(session, strategy_opts)
- if rf < num_nodes:
- # Increasing rf, need to run repair.
- helpers.repair_auth_keyspace()
- helpers.set_active()
-
-
-@action
-def store_unit_private_ip():
- '''Store the unit's private ip address, so we can tell if it changes.'''
- hookenv.config()['unit_private_ip'] = hookenv.unit_private_ip()
-
-
-def needs_restart():
- '''Return True if Cassandra is not running or needs to be restarted.'''
- if helpers.is_decommissioned():
- # Decommissioned nodes are never restarted. They remain up
- # telling everyone they are decommissioned.
- helpers.status_set('blocked', 'Decommissioned node')
- return False
-
- if not helpers.is_cassandra_running():
- if helpers.is_bootstrapped():
- helpers.status_set('waiting', 'Waiting for permission to start')
- else:
- helpers.status_set('waiting',
- 'Waiting for permission to bootstrap')
- return True
-
- config = hookenv.config()
-
- # If our IP address has changed, we need to restart.
- if config.changed('unit_private_ip'):
- helpers.status_set('waiting', 'IP address changed. '
- 'Waiting for restart permission.')
- return True
-
- # If the directory paths have changed, we need to migrate data
- # during a restart.
- storage = relations.StorageRelation()
- if storage.needs_remount():
- helpers.status_set(hookenv.status_get()[0],
- 'New mounts. Waiting for restart permission')
- return True
-
- # If any of these config items changed, a restart is required.
- for key in RESTART_REQUIRED_KEYS:
- if config.changed(key):
- hookenv.log('{} changed. Restart required.'.format(key))
- for key in RESTART_REQUIRED_KEYS:
- if config.changed(key):
- helpers.status_set(hookenv.status_get()[0],
- 'Config changes. '
- 'Waiting for restart permission.')
- return True
-
- # If we have new seeds, we should restart.
- new_seeds = helpers.get_seed_ips()
- if config.get('configured_seeds') != sorted(new_seeds):
- old_seeds = set(config.previous('configured_seeds') or [])
- changed = old_seeds.symmetric_difference(new_seeds)
- # We don't care about the local node in the changes.
- changed.discard(hookenv.unit_private_ip())
- if changed:
- helpers.status_set(hookenv.status_get()[0],
- 'Updated seeds {!r}. '
- 'Waiting for restart permission.'
- ''.format(new_seeds))
- return True
-
- hookenv.log('Restart not required')
- return False
-
-
-@action
-@coordinator.require('restart', needs_restart)
-def maybe_restart():
- '''Restart sequence.
-
- If a restart is needed, shutdown Cassandra, perform all pending operations
- that cannot be be done while Cassandra is live, and restart it.
- '''
- helpers.status_set('maintenance', 'Stopping Cassandra')
- helpers.stop_cassandra()
- helpers.remount_cassandra()
- helpers.ensure_database_directories()
- if helpers.peer_relid() and not helpers.is_bootstrapped():
- helpers.status_set('maintenance', 'Bootstrapping')
- else:
- helpers.status_set('maintenance', 'Starting Cassandra')
- helpers.start_cassandra()
-
-
-@action
-def post_bootstrap():
- '''Maintain state on if the node has bootstrapped into the cluster.
-
- Per documented procedure for adding new units to a cluster, wait 2
- minutes if the unit has just bootstrapped to ensure other units
- do not attempt bootstrap too soon. Also, wait until completed joining
- to ensure we keep the lock and ensure other nodes don't restart or
- bootstrap.
- '''
- if not helpers.is_bootstrapped():
- if coordinator.relid is not None:
- helpers.status_set('maintenance', 'Post-bootstrap 2 minute delay')
- hookenv.log('Post-bootstrap 2 minute delay')
- time.sleep(120) # Must wait 2 minutes between bootstrapping nodes.
-
- join_msg_set = False
- while True:
- status = helpers.get_node_status()
- if status == 'NORMAL':
- break
- elif status == 'JOINING':
- if not join_msg_set:
- helpers.status_set('maintenance', 'Still joining cluster')
- join_msg_set = True
- time.sleep(10)
- continue
- else:
- if status is None:
- helpers.status_set('blocked',
- 'Unexpectedly shutdown during '
- 'bootstrap')
- else:
- helpers.status_set('blocked',
- 'Failed to bootstrap ({})'
- ''.format(status))
- raise SystemExit(0)
-
- # Unconditionally call this to publish the bootstrapped flag to
- # the peer relation, as the first unit was bootstrapped before
- # the peer relation existed.
- helpers.set_bootstrapped()
-
-
-@action
-def stop_cassandra():
- helpers.stop_cassandra()
-
-
-@action
-def start_cassandra():
- helpers.start_cassandra()
-
-
-@leader_only
-@action
-@authentication
-def create_unit_superusers():
- # The leader creates and updates accounts for nodes, using the
- # encrypted password they provide in relations.PeerRelation. We
- # don't end up with unencrypted passwords leaving the unit, and we
- # don't need to restart Cassandra in no-auth mode which is slow and
- # I worry may cause issues interrupting the bootstrap.
- if not coordinator.relid:
- return # No peer relation, no requests yet.
-
- created_units = helpers.get_unit_superusers()
- uncreated_units = [u for u in hookenv.related_units(coordinator.relid)
- if u not in created_units]
- for peer in uncreated_units:
- rel = hookenv.relation_get(unit=peer, rid=coordinator.relid)
- username = rel.get('username')
- pwhash = rel.get('pwhash')
- if not username:
- continue
- hookenv.log('Creating {} account for {}'.format(username, peer))
- with helpers.connect() as session:
- helpers.ensure_user(session, username, pwhash, superuser=True)
- created_units.add(peer)
- helpers.set_unit_superusers(created_units)
-
-
-@action
-def reset_all_io_schedulers():
- dirs = helpers.get_all_database_directories()
- dirs = (dirs['data_file_directories'] + [dirs['commitlog_directory']] +
- [dirs['saved_caches_directory']])
- config = hookenv.config()
- for d in dirs:
- if os.path.isdir(d): # Directory may not exist yet.
- helpers.set_io_scheduler(config['io_scheduler'], d)
-
-
-def _client_credentials(relid):
- '''Return the client credentials used by relation relid.'''
- relinfo = hookenv.relation_get(unit=hookenv.local_unit(), rid=relid)
- username = relinfo.get('username')
- password = relinfo.get('password')
- if username is None or password is None:
- for unit in hookenv.related_units(coordinator.relid):
- try:
- relinfo = hookenv.relation_get(unit=unit, rid=relid)
- username = relinfo.get('username')
- password = relinfo.get('password')
- if username is not None and password is not None:
- return username, password
- except subprocess.CalledProcessError:
- pass # Assume the remote unit has not joined yet.
- return None, None
- else:
- return username, password
-
-
-def _publish_database_relation(relid, superuser):
- # The Casandra service needs to provide a common set of credentials
- # to a client unit. The leader creates these, if none of the other
- # units are found to have published them already (a previously elected
- # leader may have done this). The leader then tickles the other units,
- # firing a hook and giving them the opportunity to copy and publish
- # these credentials.
- username, password = _client_credentials(relid)
- if username is None:
- if hookenv.is_leader():
- # Credentials not set. The leader must generate them. We use
- # the service name so that database permissions remain valid
- # even after the relation is dropped and recreated, or the
- # juju environment rebuild and the database restored from
- # backups.
- service_name = helpers.get_service_name(relid)
- if not service_name:
- # Per Bug #1555261, we might not yet have related units,
- # so no way to calculate the remote service name and thus
- # the user.
- return # Try again later.
- username = 'juju_{}'.format(helpers.get_service_name(relid))
- if superuser:
- username += '_admin'
- password = host.pwgen()
- pwhash = helpers.encrypt_password(password)
- with helpers.connect() as session:
- helpers.ensure_user(session, username, pwhash, superuser)
- # Wake the peers, if any.
- helpers.leader_ping()
- else:
- return # No credentials yet. Nothing to do.
-
- # Publish the information the client needs on the relation where
- # they can find it.
- # - authentication credentials
- # - address and port
- # - cluster_name, so clients can differentiate multiple clusters
- # - datacenter + rack, so clients know what names they can use
- # when altering keyspace replication settings.
- config = hookenv.config()
- hookenv.relation_set(relid,
- username=username, password=password,
- host=hookenv.unit_public_ip(),
- native_transport_port=config['native_transport_port'],
- rpc_port=config['rpc_port'],
- cluster_name=config['cluster_name'],
- datacenter=config['datacenter'],
- rack=config['rack'])
-
-
-@action
-def publish_database_relations():
- for relid in hookenv.relation_ids('database'):
- _publish_database_relation(relid, superuser=False)
-
-
-@action
-def publish_database_admin_relations():
- for relid in hookenv.relation_ids('database-admin'):
- _publish_database_relation(relid, superuser=True)
-
-
-@action
-def install_maintenance_crontab():
- # Every unit should run repair once per week (at least once per
- # GCGraceSeconds, which defaults to 10 days but can be changed per
- # keyspace). # Distribute the repair time evenly over the week.
- unit_num = int(hookenv.local_unit().split('/')[-1])
- dow, hour, minute = helpers.week_spread(unit_num)
- contents = jinja.render('cassandra_maintenance_cron.tmpl', vars())
- cron_path = "/etc/cron.d/cassandra-maintenance"
- host.write_file(cron_path, contents.encode('US-ASCII'))
-
-
-@action
-def emit_cluster_info():
- helpers.emit_describe_cluster()
- helpers.emit_status()
- helpers.emit_netstats()
-
-
-@action
-def configure_firewall():
- '''Configure firewall rules using ufw.
-
- This is primarily to block access to the replication and JMX ports,
- as juju's default port access controls are not strict enough and
- allow access to the entire environment.
- '''
- config = hookenv.config()
- ufw.enable(soft_fail=True)
-
- # Enable SSH from anywhere, relying on Juju and external firewalls
- # to control access.
- ufw.service('ssh', 'open')
- ufw.service('nrpe', 'open') # Also NRPE for nagios checks.
- ufw.service('rsync', 'open') # Also rsync for data transfer and backups.
-
- # Clients need client access. These protocols are configured to
- # require authentication.
- client_keys = ['native_transport_port', 'rpc_port']
- client_ports = [config[key] for key in client_keys]
-
- # Peers need replication access. This protocols does not
- # require authentication, so firewall it from other nodes.
- peer_ports = [config['storage_port'], config['ssl_storage_port']]
-
- # Enable client access from anywhere. Juju and external firewalls
- # can still restrict this further of course (ie. 'juju expose').
- for key in client_keys:
- if config.changed(key) and config.previous(key) is not None:
- # First close old ports. We use this order in the unlikely case
- # someone is trying to swap the native and Thrift ports.
- ufw.service(config.previous(key), 'close')
- for port in client_ports:
- # Then open or close the configured ports.
- ufw.service(port, 'open')
-
- desired_rules = set() # ufw.grant_access/remove_access commands.
-
- # Rules for peers
- for relinfo in hookenv.relations_of_type('cluster'):
- if relinfo['private-address']:
- pa = hookenv._ensure_ip(relinfo['private-address'])
- for port in peer_ports:
- desired_rules.add((pa, 'any', port))
- # Rules for admin connections. We allow database-admin relations access
- # to the cluster communication ports so that tools like sstableloader
- # can run.
- for relinfo in hookenv.relations_of_type('database-admin'):
- if relinfo['private-address']:
- pa = hookenv._ensure_ip(relinfo['private-address'])
- for port in peer_ports:
- desired_rules.add((pa, 'any', port))
-
- previous_rules = set(tuple(rule) for rule in config.get('ufw_rules', []))
-
- # Close any rules previously opened that are no longer desired.
- for rule in sorted(list(previous_rules - desired_rules)):
- ufw.revoke_access(*rule)
-
- # Open all the desired rules.
- for rule in sorted(list(desired_rules)):
- ufw.grant_access(*rule)
-
- # Store our rules for next time. Note that this is inherantly racy -
- # this value is only persisted if the hook exits cleanly. If the
- # hook fails, then someone changes port configuration or IP
- # addresses change, then the failed hook retried, we can lose track
- # of previously granted rules and they will never be revoked. It is
- # impossible to remove this race entirely, so we stick with this
- # simple approach.
- config['ufw_rules'] = list(desired_rules) # A list because JSON.
-
-
-@action
-def nrpe_external_master_relation():
- ''' Configure the nrpe-external-master relation '''
- local_plugins = helpers.local_plugins_dir()
- if os.path.exists(local_plugins):
- src = os.path.join(hookenv.charm_dir(),
- "files", "check_cassandra_heap.sh")
- with open(src, 'rb') as f:
- host.write_file(os.path.join(local_plugins,
- 'check_cassandra_heap.sh'),
- f.read(), perms=0o555)
-
- nrpe_compat = nrpe.NRPE()
- conf = hookenv.config()
-
- cassandra_heap_warn = conf.get('nagios_heapchk_warn_pct')
- cassandra_heap_crit = conf.get('nagios_heapchk_crit_pct')
- if cassandra_heap_warn and cassandra_heap_crit:
- nrpe_compat.add_check(
- shortname="cassandra_heap",
- description="Check Cassandra Heap",
- check_cmd="check_cassandra_heap.sh localhost {} {}"
- "".format(cassandra_heap_warn, cassandra_heap_crit))
-
- cassandra_disk_warn = conf.get('nagios_disk_warn_pct')
- cassandra_disk_crit = conf.get('nagios_disk_crit_pct')
- dirs = helpers.get_all_database_directories()
- dirs = set(dirs['data_file_directories'] +
- [dirs['commitlog_directory'], dirs['saved_caches_directory']])
- # We need to check the space on the mountpoint, not on the actual
- # directory, as the nagios user won't have access to the actual directory.
- mounts = set(helpers.mountpoint(d) for d in dirs)
- for disk in mounts:
- check_name = re.sub('[^A-Za-z0-9_]', '_', disk)
- if cassandra_disk_warn and cassandra_disk_crit:
- shortname = "cassandra_disk{}".format(check_name)
- hookenv.log("Adding disk utilization check {}".format(shortname),
- DEBUG)
- nrpe_compat.add_check(
- shortname=shortname,
- description="Check Cassandra Disk {}".format(disk),
- check_cmd="check_disk -u GB -w {}% -c {}% -K 5% -p {}"
- "".format(cassandra_disk_warn, cassandra_disk_crit,
- disk))
- nrpe_compat.write()
-
-
-@leader_only
-@action
-def maintain_seeds():
- '''The leader needs to maintain the list of seed nodes'''
- seed_ips = helpers.get_seed_ips()
- hookenv.log('Current seeds == {!r}'.format(seed_ips), DEBUG)
-
- bootstrapped_ips = helpers.get_bootstrapped_ips()
- hookenv.log('Bootstrapped == {!r}'.format(bootstrapped_ips), DEBUG)
-
- # Remove any seeds that are no longer bootstrapped, such as dropped
- # units.
- seed_ips.intersection_update(bootstrapped_ips)
-
- # Add more bootstrapped nodes, if necessary, to get to our maximum
- # of 3 seeds.
- potential_seed_ips = list(reversed(sorted(bootstrapped_ips)))
- while len(seed_ips) < 3 and potential_seed_ips:
- seed_ips.add(potential_seed_ips.pop())
-
- # If there are no seeds or bootstrapped nodes, start with the leader. Us.
- if len(seed_ips) == 0:
- seed_ips.add(hookenv.unit_private_ip())
-
- hookenv.log('Updated seeds == {!r}'.format(seed_ips), DEBUG)
-
- hookenv.leader_set(seeds=','.join(sorted(seed_ips)))
-
-
-@leader_only
-@action
-@authentication
-def reset_default_password():
- if hookenv.leader_get('default_admin_password_changed'):
- hookenv.log('Default admin password already changed')
- return
-
- # Cassandra ships with well known credentials, rather than
- # providing a tool to reset credentials. This is a huge security
- # hole we must close.
- try:
- # We need a big timeout here, as the cassandra user actually
- # springs into existence some time after Cassandra has started
- # up and is accepting connections.
- with helpers.connect('cassandra', 'cassandra',
- timeout=120, auth_timeout=120) as session:
- # But before we close this security hole, we need to use these
- # credentials to create a different admin account for the
- # leader, allowing it to create accounts for other nodes as they
- # join. The alternative is restarting Cassandra without
- # authentication, which this charm will likely need to do in the
- # future when we allow Cassandra services to be related together.
- helpers.status_set('maintenance',
- 'Creating initial superuser account')
- username, password = helpers.superuser_credentials()
- pwhash = helpers.encrypt_password(password)
- helpers.ensure_user(session, username, pwhash, superuser=True)
- helpers.set_unit_superusers([hookenv.local_unit()])
-
- helpers.status_set('maintenance',
- 'Changing default admin password')
- helpers.query(session, 'ALTER USER cassandra WITH PASSWORD %s',
- cassandra.ConsistencyLevel.ALL, (host.pwgen(),))
- except cassandra.AuthenticationFailed:
- hookenv.log('Default superuser account already reset')
- try:
- with helpers.connect():
- hookenv.log("Leader's superuser account already created")
- except cassandra.AuthenticationFailed:
- # We have no known superuser credentials. Create the account
- # the hard, slow way. This will be the normal method
- # of creating the service's initial account when we allow
- # services to be related together.
- helpers.create_unit_superuser_hard()
-
- hookenv.leader_set(default_admin_password_changed=True)
-
-
-@action
-def set_active():
- # If we got this far, the unit is active. Update the status if it is
- # not already active. We don't do this unconditionally, as the charm
- # may be active but doing stuff, like active but waiting for restart
- # permission.
- if hookenv.status_get()[0] != 'active':
- helpers.set_active()
- else:
- hookenv.log('Unit status already active', DEBUG)
-
-
-@action
-@authentication
-def request_unit_superuser():
- relid = helpers.peer_relid()
- if relid is None:
- hookenv.log('Request deferred until peer relation exists')
- return
-
- relinfo = hookenv.relation_get(unit=hookenv.local_unit(),
- rid=relid)
- if relinfo and relinfo.get('username'):
- # We must avoid blindly setting the pwhash on the relation,
- # as we will likely get a different value everytime we
- # encrypt the password due to the random salt.
- hookenv.log('Superuser account request previously made')
- else:
- # Publish the requested superuser and hash to our peers.
- username, password = helpers.superuser_credentials()
- pwhash = helpers.encrypt_password(password)
- hookenv.relation_set(relid, username=username, pwhash=pwhash)
- hookenv.log('Requested superuser account creation')
-
-
-@action
-def update_etc_hosts():
- hostname = socket.gethostname()
- addr = hookenv.unit_private_ip()
- hosts_map = {addr: hostname}
- # only need to add myself to /etc/hosts
- helpers.update_hosts_file('/etc/hosts', hosts_map)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py
deleted file mode 100644
index 1d039ea..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-import time
-import os
-from distutils.spawn import find_executable
-
-from charmhelpers.core.hookenv import (
- in_relation_hook,
- relation_ids,
- relation_set,
- relation_get,
-)
-
-
-def action_set(key, val):
- if find_executable('action-set'):
- action_cmd = ['action-set']
-
- if isinstance(val, dict):
- for k, v in iter(val.items()):
- action_set('%s.%s' % (key, k), v)
- return True
-
- action_cmd.append('%s=%s' % (key, val))
- subprocess.check_call(action_cmd)
- return True
- return False
-
-
-class Benchmark():
- """
- Helper class for the `benchmark` interface.
-
- :param list actions: Define the actions that are also benchmarks
-
- From inside the benchmark-relation-changed hook, you would
- Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
-
- Examples:
-
- siege = Benchmark(['siege'])
- siege.start()
- [... run siege ...]
- # The higher the score, the better the benchmark
- siege.set_composite_score(16.70, 'trans/sec', 'desc')
- siege.finish()
-
-
- """
-
- BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
-
- required_keys = [
- 'hostname',
- 'port',
- 'graphite_port',
- 'graphite_endpoint',
- 'api_port'
- ]
-
- def __init__(self, benchmarks=None):
- if in_relation_hook():
- if benchmarks is not None:
- for rid in sorted(relation_ids('benchmark')):
- relation_set(relation_id=rid, relation_settings={
- 'benchmarks': ",".join(benchmarks)
- })
-
- # Check the relation data
- config = {}
- for key in self.required_keys:
- val = relation_get(key)
- if val is not None:
- config[key] = val
- else:
- # We don't have all of the required keys
- config = {}
- break
-
- if len(config):
- with open(self.BENCHMARK_CONF, 'w') as f:
- for key, val in iter(config.items()):
- f.write("%s=%s\n" % (key, val))
-
- @staticmethod
- def start():
- action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
-
- """
- If the collectd charm is also installed, tell it to send a snapshot
- of the current profile data.
- """
- COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
- if os.path.exists(COLLECT_PROFILE_DATA):
- subprocess.check_output([COLLECT_PROFILE_DATA])
-
- @staticmethod
- def finish():
- action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
-
- @staticmethod
- def set_composite_score(value, units, direction='asc'):
- """
- Set the composite score for a benchmark run. This is a single number
- representative of the benchmark results. This could be the most
- important metric, or an amalgamation of metric scores.
- """
- return action_set(
- "meta.composite",
- {'value': value, 'units': units, 'direction': direction}
- )
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py
deleted file mode 100644
index 2f24642..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Compatibility with the nrpe-external-master charm"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import subprocess
-import pwd
-import grp
-import os
-import glob
-import shutil
-import re
-import shlex
-import yaml
-
-from charmhelpers.core.hookenv import (
- config,
- local_unit,
- log,
- relation_ids,
- relation_set,
- relations_of_type,
-)
-
-from charmhelpers.core.host import service
-
-# This module adds compatibility with the nrpe-external-master and plain nrpe
-# subordinate charms. To use it in your charm:
-#
-# 1. Update metadata.yaml
-#
-# provides:
-# (...)
-# nrpe-external-master:
-# interface: nrpe-external-master
-# scope: container
-#
-# and/or
-#
-# provides:
-# (...)
-# local-monitors:
-# interface: local-monitors
-# scope: container
-
-#
-# 2. Add the following to config.yaml
-#
-# nagios_context:
-# default: "juju"
-# type: string
-# description: |
-# Used by the nrpe subordinate charms.
-# A string that will be prepended to instance name to set the host name
-# in nagios. So for instance the hostname would be something like:
-# juju-myservice-0
-# If you're running multiple environments with the same services in them
-# this allows you to differentiate between them.
-# nagios_servicegroups:
-# default: ""
-# type: string
-# description: |
-# A comma-separated list of nagios servicegroups.
-# If left empty, the nagios_context will be used as the servicegroup
-#
-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
-#
-# 4. Update your hooks.py with something like this:
-#
-# from charmsupport.nrpe import NRPE
-# (...)
-# def update_nrpe_config():
-# nrpe_compat = NRPE()
-# nrpe_compat.add_check(
-# shortname = "myservice",
-# description = "Check MyService",
-# check_cmd = "check_http -w 2 -c 10 http://localhost"
-# )
-# nrpe_compat.add_check(
-# "myservice_other",
-# "Check for widget failures",
-# check_cmd = "/srv/myapp/scripts/widget_check"
-# )
-# nrpe_compat.write()
-#
-# def config_changed():
-# (...)
-# update_nrpe_config()
-#
-# def nrpe_external_master_relation_changed():
-# update_nrpe_config()
-#
-# def local_monitors_relation_changed():
-# update_nrpe_config()
-#
-# 5. ln -s hooks.py nrpe-external-master-relation-changed
-# ln -s hooks.py local-monitors-relation-changed
-
-
-class CheckException(Exception):
- pass
-
-
-class Check(object):
- shortname_re = '[A-Za-z0-9-_]+$'
- service_template = ("""
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-define service {{
- use active-service
- host_name {nagios_hostname}
- service_description {nagios_hostname}[{shortname}] """
- """{description}
- check_command check_nrpe!{command}
- servicegroups {nagios_servicegroup}
-}}
-""")
-
- def __init__(self, shortname, description, check_cmd):
- super(Check, self).__init__()
- # XXX: could be better to calculate this from the service name
- if not re.match(self.shortname_re, shortname):
- raise CheckException("shortname must match {}".format(
- Check.shortname_re))
- self.shortname = shortname
- self.command = "check_{}".format(shortname)
- # Note: a set of invalid characters is defined by the
- # Nagios server config
- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
- self.description = description
- self.check_cmd = self._locate_cmd(check_cmd)
-
- def _get_check_filename(self):
- return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
-
- def _get_service_filename(self, hostname):
- return os.path.join(NRPE.nagios_exportdir,
- 'service__{}_{}.cfg'.format(hostname, self.command))
-
- def _locate_cmd(self, check_cmd):
- search_path = (
- '/usr/lib/nagios/plugins',
- '/usr/local/lib/nagios/plugins',
- )
- parts = shlex.split(check_cmd)
- for path in search_path:
- if os.path.exists(os.path.join(path, parts[0])):
- command = os.path.join(path, parts[0])
- if len(parts) > 1:
- command += " " + " ".join(parts[1:])
- return command
- log('Check command not found: {}'.format(parts[0]))
- return ''
-
- def _remove_service_files(self):
- if not os.path.exists(NRPE.nagios_exportdir):
- return
- for f in os.listdir(NRPE.nagios_exportdir):
- if f.endswith('_{}.cfg'.format(self.command)):
- os.remove(os.path.join(NRPE.nagios_exportdir, f))
-
- def remove(self, hostname):
- nrpe_check_file = self._get_check_filename()
- if os.path.exists(nrpe_check_file):
- os.remove(nrpe_check_file)
- self._remove_service_files()
-
- def write(self, nagios_context, hostname, nagios_servicegroups):
- nrpe_check_file = self._get_check_filename()
- with open(nrpe_check_file, 'w') as nrpe_check_config:
- nrpe_check_config.write("# check {}\n".format(self.shortname))
- nrpe_check_config.write("command[{}]={}\n".format(
- self.command, self.check_cmd))
-
- if not os.path.exists(NRPE.nagios_exportdir):
- log('Not writing service config as {} is not accessible'.format(
- NRPE.nagios_exportdir))
- else:
- self.write_service_config(nagios_context, hostname,
- nagios_servicegroups)
-
- def write_service_config(self, nagios_context, hostname,
- nagios_servicegroups):
- self._remove_service_files()
-
- templ_vars = {
- 'nagios_hostname': hostname,
- 'nagios_servicegroup': nagios_servicegroups,
- 'description': self.description,
- 'shortname': self.shortname,
- 'command': self.command,
- }
- nrpe_service_text = Check.service_template.format(**templ_vars)
- nrpe_service_file = self._get_service_filename(hostname)
- with open(nrpe_service_file, 'w') as nrpe_service_config:
- nrpe_service_config.write(str(nrpe_service_text))
-
- def run(self):
- subprocess.call(self.check_cmd)
-
-
-class NRPE(object):
- nagios_logdir = '/var/log/nagios'
- nagios_exportdir = '/var/lib/nagios/export'
- nrpe_confdir = '/etc/nagios/nrpe.d'
-
- def __init__(self, hostname=None):
- super(NRPE, self).__init__()
- self.config = config()
- self.nagios_context = self.config['nagios_context']
- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
- self.nagios_servicegroups = self.config['nagios_servicegroups']
- else:
- self.nagios_servicegroups = self.nagios_context
- self.unit_name = local_unit().replace('/', '-')
- if hostname:
- self.hostname = hostname
- else:
- nagios_hostname = get_nagios_hostname()
- if nagios_hostname:
- self.hostname = nagios_hostname
- else:
- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
- self.checks = []
-
- def add_check(self, *args, **kwargs):
- self.checks.append(Check(*args, **kwargs))
-
- def remove_check(self, *args, **kwargs):
- if kwargs.get('shortname') is None:
- raise ValueError('shortname of check must be specified')
-
- # Use sensible defaults if they're not specified - these are not
- # actually used during removal, but they're required for constructing
- # the Check object; check_disk is chosen because it's part of the
- # nagios-plugins-basic package.
- if kwargs.get('check_cmd') is None:
- kwargs['check_cmd'] = 'check_disk'
- if kwargs.get('description') is None:
- kwargs['description'] = ''
-
- check = Check(*args, **kwargs)
- check.remove(self.hostname)
-
- def write(self):
- try:
- nagios_uid = pwd.getpwnam('nagios').pw_uid
- nagios_gid = grp.getgrnam('nagios').gr_gid
- except:
- log("Nagios user not set up, nrpe checks not updated")
- return
-
- if not os.path.exists(NRPE.nagios_logdir):
- os.mkdir(NRPE.nagios_logdir)
- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
-
- nrpe_monitors = {}
- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
- for nrpecheck in self.checks:
- nrpecheck.write(self.nagios_context, self.hostname,
- self.nagios_servicegroups)
- nrpe_monitors[nrpecheck.shortname] = {
- "command": nrpecheck.command,
- }
-
- service('restart', 'nagios-nrpe-server')
-
- monitor_ids = relation_ids("local-monitors") + \
- relation_ids("nrpe-external-master")
- for rid in monitor_ids:
- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
-
-
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_host_context
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_host_context' in rel:
- return rel['nagios_host_context']
-
-
-def get_nagios_hostname(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_hostname
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_hostname' in rel:
- return rel['nagios_hostname']
-
-
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
- """
- Return the nagios unit name prepended with host_context if needed
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- host_context = get_nagios_hostcontext(relation_name)
- if host_context:
- unit = "%s:%s" % (host_context, local_unit())
- else:
- unit = local_unit()
- return unit
-
-
-def add_init_service_checks(nrpe, services, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param list services: List of services to check
- :param str unit_name: Unit name to use in check description
- """
- for svc in services:
- upstart_init = '/etc/init/%s.conf' % svc
- sysv_init = '/etc/init.d/%s' % svc
- if os.path.exists(upstart_init):
- # Don't add a check for these services from neutron-gateway
- if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_upstart_job %s' % svc
- )
- elif os.path.exists(sysv_init):
- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
- cron_file = ('*/5 * * * * root '
- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-s /etc/init.d/%s status > '
- '/var/lib/nagios/service-check-%s.txt\n' % (svc,
- svc)
- )
- f = open(cronpath, 'w')
- f.write(cron_file)
- f.close()
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_status_file.py -f '
- '/var/lib/nagios/service-check-%s.txt' % svc,
- )
-
-
-def copy_nrpe_checks():
- """
- Copy the nrpe checks into place
-
- """
- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
- nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
- 'charmhelpers', 'contrib', 'openstack',
- 'files')
-
- if not os.path.exists(NAGIOS_PLUGINS):
- os.makedirs(NAGIOS_PLUGINS)
- for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
- if os.path.isfile(fname):
- shutil.copy2(fname,
- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
-
-
-def add_haproxy_checks(nrpe, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param str unit_name: Unit name to use in check description
- """
- nrpe.add_check(
- shortname='haproxy_servers',
- description='Check HAProxy {%s}' % unit_name,
- check_cmd='check_haproxy.sh')
- nrpe.add_check(
- shortname='haproxy_queue',
- description='Check HAProxy queue depth {%s}' % unit_name,
- check_cmd='check_haproxy_queue_depth.sh')
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py
deleted file mode 100644
index 320961b..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Functions for managing volumes in juju units. One volume is supported per unit.
-Subordinates may have their own storage, provided it is on its own partition.
-
-Configuration stanzas::
-
- volume-ephemeral:
- type: boolean
- default: true
- description: >
- If false, a volume is mounted as sepecified in "volume-map"
- If true, ephemeral storage will be used, meaning that log data
- will only exist as long as the machine. YOU HAVE BEEN WARNED.
- volume-map:
- type: string
- default: {}
- description: >
- YAML map of units to device names, e.g:
- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
- Service units will raise a configure-error if volume-ephemeral
- is 'true' and no volume-map value is set. Use 'juju set' to set a
- value and 'juju resolved' to complete configuration.
-
-Usage::
-
- from charmsupport.volumes import configure_volume, VolumeConfigurationError
- from charmsupport.hookenv import log, ERROR
- def post_mount_hook():
- stop_service('myservice')
- def post_mount_hook():
- start_service('myservice')
-
- if __name__ == '__main__':
- try:
- configure_volume(before_change=pre_mount_hook,
- after_change=post_mount_hook)
- except VolumeConfigurationError:
- log('Storage could not be configured', ERROR)
-
-'''
-
-# XXX: Known limitations
-# - fstab is neither consulted nor updated
-
-import os
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-import yaml
-
-
-MOUNT_BASE = '/srv/juju/volumes'
-
-
-class VolumeConfigurationError(Exception):
- '''Volume configuration data is missing or invalid'''
- pass
-
-
-def get_config():
- '''Gather and sanity-check volume configuration data'''
- volume_config = {}
- config = hookenv.config()
-
- errors = False
-
- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
- volume_config['ephemeral'] = True
- else:
- volume_config['ephemeral'] = False
-
- try:
- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
- except yaml.YAMLError as e:
- hookenv.log("Error parsing YAML volume-map: {}".format(e),
- hookenv.ERROR)
- errors = True
- if volume_map is None:
- # probably an empty string
- volume_map = {}
- elif not isinstance(volume_map, dict):
- hookenv.log("Volume-map should be a dictionary, not {}".format(
- type(volume_map)))
- errors = True
-
- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
- if volume_config['device'] and volume_config['ephemeral']:
- # asked for ephemeral storage but also defined a volume ID
- hookenv.log('A volume is defined for this unit, but ephemeral '
- 'storage was requested', hookenv.ERROR)
- errors = True
- elif not volume_config['device'] and not volume_config['ephemeral']:
- # asked for permanent storage but did not define volume ID
- hookenv.log('Ephemeral storage was requested, but there is no volume '
- 'defined for this unit.', hookenv.ERROR)
- errors = True
-
- unit_mount_name = hookenv.local_unit().replace('/', '-')
- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
-
- if errors:
- return None
- return volume_config
-
-
-def mount_volume(config):
- if os.path.exists(config['mountpoint']):
- if not os.path.isdir(config['mountpoint']):
- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
- raise VolumeConfigurationError()
- else:
- host.mkdir(config['mountpoint'])
- if os.path.ismount(config['mountpoint']):
- unmount_volume(config)
- if not host.mount(config['device'], config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def unmount_volume(config):
- if os.path.ismount(config['mountpoint']):
- if not host.umount(config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def managed_mounts():
- '''List of all mounted managed volumes'''
- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
-
-
-def configure_volume(before_change=lambda: None, after_change=lambda: None):
- '''Set up storage (or don't) according to the charm's volume configuration.
- Returns the mount point or "ephemeral". before_change and after_change
- are optional functions to be called if the volume configuration changes.
- '''
-
- config = get_config()
- if not config:
- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
- raise VolumeConfigurationError()
-
- if config['ephemeral']:
- if os.path.ismount(config['mountpoint']):
- before_change()
- unmount_volume(config)
- after_change()
- return 'ephemeral'
- else:
- # persistent storage
- if os.path.ismount(config['mountpoint']):
- mounts = dict(managed_mounts())
- if mounts.get(config['mountpoint']) != config['device']:
- before_change()
- unmount_volume(config)
- mount_volume(config)
- after_change()
- else:
- before_change()
- mount_volume(config)
- after_change()
- return config['mountpoint']
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py
deleted file mode 100644
index b65d963..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module contains helpers to add and remove ufw rules.
-
-Examples:
-
-- open SSH port for subnet 10.0.3.0/24:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
-
-- open service by name as defined in /etc/services:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('ssh', 'open')
-
-- close service by port number:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('4949', 'close') # munin
-"""
-import re
-import os
-import subprocess
-
-from charmhelpers.core import hookenv
-from charmhelpers.core.kernel import modprobe, is_module_loaded
-
-__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
-
-
-class UFWError(Exception):
- pass
-
-
-class UFWIPv6Error(UFWError):
- pass
-
-
-def is_enabled():
- """
- Check if `ufw` is enabled
-
- :returns: True if ufw is enabled
- """
- output = subprocess.check_output(['ufw', 'status'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Status: active\n', output, re.M)
-
- return len(m) >= 1
-
-
-def is_ipv6_ok(soft_fail=False):
- """
- Check if IPv6 support is present and ip6tables functional
-
- :param soft_fail: If set to True and IPv6 support is broken, then reports
- that the host doesn't have IPv6 support, otherwise a
- UFWIPv6Error exception is raised.
- :returns: True if IPv6 is working, False otherwise
- """
-
- # do we have IPv6 in the machine?
- if os.path.isdir('/proc/sys/net/ipv6'):
- # is ip6tables kernel module loaded?
- if not is_module_loaded('ip6_tables'):
- # ip6tables support isn't complete, let's try to load it
- try:
- modprobe('ip6_tables')
- # great, we can load the module
- return True
- except subprocess.CalledProcessError as ex:
- hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
- level="WARN")
- # we are in a world where ip6tables isn't working
- if soft_fail:
- # so we inform that the machine doesn't have IPv6
- return False
- else:
- raise UFWIPv6Error("IPv6 firewall support broken")
- else:
- # the module is present :)
- return True
-
- else:
- # the system doesn't have IPv6
- return False
-
-
-def disable_ipv6():
- """
- Disable ufw IPv6 support in /etc/default/ufw
- """
- exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
- '/etc/default/ufw'])
- if exit_code == 0:
- hookenv.log('IPv6 support in ufw disabled', level='INFO')
- else:
- hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
- raise UFWError("Couldn't disable IPv6 support in ufw")
-
-
-def enable(soft_fail=False):
- """
- Enable ufw
-
- :param soft_fail: If set to True silently disables IPv6 support in ufw,
- otherwise a UFWIPv6Error exception is raised when IP6
- support is broken.
- :returns: True if ufw is successfully enabled
- """
- if is_enabled():
- return True
-
- if not is_ipv6_ok(soft_fail):
- disable_ipv6()
-
- output = subprocess.check_output(['ufw', 'enable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall('^Firewall is active and enabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be enabled", level='WARN')
- return False
- else:
- hookenv.log("ufw enabled", level='INFO')
- return True
-
-
-def disable():
- """
- Disable ufw
-
- :returns: True if ufw is successfully disabled
- """
- if not is_enabled():
- return True
-
- output = subprocess.check_output(['ufw', 'disable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Firewall stopped and disabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be disabled", level='WARN')
- return False
- else:
- hookenv.log("ufw disabled", level='INFO')
- return True
-
-
-def default_policy(policy='deny', direction='incoming'):
- """
- Changes the default policy for traffic `direction`
-
- :param policy: allow, deny or reject
- :param direction: traffic direction, possible values: incoming, outgoing,
- routed
- """
- if policy not in ['allow', 'deny', 'reject']:
- raise UFWError(('Unknown policy %s, valid values: '
- 'allow, deny, reject') % policy)
-
- if direction not in ['incoming', 'outgoing', 'routed']:
- raise UFWError(('Unknown direction %s, valid values: '
- 'incoming, outgoing, routed') % direction)
-
- output = subprocess.check_output(['ufw', 'default', policy, direction],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
- hookenv.log(output, level='DEBUG')
-
- m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
- policy),
- output, re.M)
- if len(m) == 0:
- hookenv.log("ufw couldn't change the default policy to %s for %s"
- % (policy, direction), level='WARN')
- return False
- else:
- hookenv.log("ufw default policy for %s changed to %s"
- % (direction, policy), level='INFO')
- return True
-
-
-def modify_access(src, dst='any', port=None, proto=None, action='allow',
- index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param action: `allow` or `delete`
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- if not is_enabled():
- hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
- return
-
- if action == 'delete':
- cmd = ['ufw', 'delete', 'allow']
- elif index is not None:
- cmd = ['ufw', 'insert', str(index), action]
- else:
- cmd = ['ufw', action]
-
- if src is not None:
- cmd += ['from', src]
-
- if dst is not None:
- cmd += ['to', dst]
-
- if port is not None:
- cmd += ['port', str(port)]
-
- if proto is not None:
- cmd += ['proto', proto]
-
- hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
-
- hookenv.log(stdout, level='INFO')
-
- if p.returncode != 0:
- hookenv.log(stderr, level='ERROR')
- hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
- p.returncode),
- level='ERROR')
-
-
-def grant_access(src, dst='any', port=None, proto=None, index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
- index=index)
-
-
-def revoke_access(src, dst='any', port=None, proto=None):
- """
- Revoke access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
-
-
-def service(name, action):
- """
- Open/close access to a service
-
- :param name: could be a service name defined in `/etc/services` or a port
- number.
- :param action: `open` or `close`
- """
- if action == 'open':
- subprocess.check_output(['ufw', 'allow', str(name)],
- universal_newlines=True)
- elif action == 'close':
- subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
- universal_newlines=True)
- else:
- raise UFWError(("'{}' not supported, use 'allow' "
- "or 'delete'").format(action))
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py
deleted file mode 100644
index c5efb16..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-Templating using the python-jinja2 package.
-"""
-import six
-from charmhelpers.fetch import apt_install, apt_update
-try:
- import jinja2
-except ImportError:
- apt_update(fatal=True)
- if six.PY3:
- apt_install(["python3-jinja2"], fatal=True)
- else:
- apt_install(["python-jinja2"], fatal=True)
- import jinja2
-
-
-DEFAULT_TEMPLATES_DIR = 'templates'
-
-
-def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR):
- templates = jinja2.Environment(
- loader=jinja2.FileSystemLoader(template_dir))
- template = templates.get_template(template_name)
- return template.render(context)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py b/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py
deleted file mode 100644
index 0303c3f..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py
+++ /dev/null
@@ -1,607 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-'''
-The coordinator module allows you to use Juju's leadership feature to
-coordinate operations between units of a service.
-
-Behavior is defined in subclasses of coordinator.BaseCoordinator.
-One implementation is provided (coordinator.Serial), which allows an
-operation to be run on a single unit at a time, on a first come, first
-served basis. You can trivially define more complex behavior by
-subclassing BaseCoordinator or Serial.
-
-:author: Stuart Bishop <stuart.bishop@canonical.com>
-
-
-Services Framework Usage
-========================
-
-Ensure a peers relation is defined in metadata.yaml. Instantiate a
-BaseCoordinator subclass before invoking ServiceManager.manage().
-Ensure that ServiceManager.manage() is wired up to the leader-elected,
-leader-settings-changed, peers relation-changed and peers
-relation-departed hooks in addition to any other hooks you need, or your
-service will deadlock.
-
-Ensure calls to acquire() are guarded, so that locks are only requested
-when they are really needed (and thus hooks only triggered when necessary).
-Failing to do this and calling acquire() unconditionally will put your unit
-into a hook loop. Calls to granted() do not need to be guarded.
-
-For example::
-
- from charmhelpers.core import hookenv, services
- from charmhelpers import coordinator
-
- def maybe_restart(servicename):
- serial = coordinator.Serial()
- if needs_restart():
- serial.acquire('restart')
- if serial.granted('restart'):
- hookenv.service_restart(servicename)
-
- services = [dict(service='servicename',
- data_ready=[maybe_restart])]
-
- if __name__ == '__main__':
- _ = coordinator.Serial() # Must instantiate before manager.manage()
- manager = services.ServiceManager(services)
- manager.manage()
-
-
-You can implement a similar pattern using a decorator. If the lock has
-not been granted, an attempt to acquire() it will be made if the guard
-function returns True. If the lock has been granted, the decorated function
-is run as normal::
-
- from charmhelpers.core import hookenv, services
- from charmhelpers import coordinator
-
- serial = coordinator.Serial() # Global, instatiated on module import.
-
- def needs_restart():
- [ ... Introspect state. Return True if restart is needed ... ]
-
- @serial.require('restart', needs_restart)
- def maybe_restart(servicename):
- hookenv.service_restart(servicename)
-
- services = [dict(service='servicename',
- data_ready=[maybe_restart])]
-
- if __name__ == '__main__':
- manager = services.ServiceManager(services)
- manager.manage()
-
-
-Traditional Usage
-=================
-
-Ensure a peers relation is defined in metadata.yaml.
-
-If you are using charmhelpers.core.hookenv.Hooks, ensure that a
-BaseCoordinator subclass is instantiated before calling Hooks.execute.
-
-If you are not using charmhelpers.core.hookenv.Hooks, ensure
-that a BaseCoordinator subclass is instantiated and its handle()
-method called at the start of all your hooks.
-
-For example::
-
- import sys
- from charmhelpers.core import hookenv
- from charmhelpers import coordinator
-
- hooks = hookenv.Hooks()
-
- def maybe_restart():
- serial = coordinator.Serial()
- if serial.granted('restart'):
- hookenv.service_restart('myservice')
-
- @hooks.hook
- def config_changed():
- update_config()
- serial = coordinator.Serial()
- if needs_restart():
- serial.acquire('restart'):
- maybe_restart()
-
- # Cluster hooks must be wired up.
- @hooks.hook('cluster-relation-changed', 'cluster-relation-departed')
- def cluster_relation_changed():
- maybe_restart()
-
- # Leader hooks must be wired up.
- @hooks.hook('leader-elected', 'leader-settings-changed')
- def leader_settings_changed():
- maybe_restart()
-
- [ ... repeat for *all* other hooks you are using ... ]
-
- if __name__ == '__main__':
- _ = coordinator.Serial() # Must instantiate before execute()
- hooks.execute(sys.argv)
-
-
-You can also use the require decorator. If the lock has not been granted,
-an attempt to acquire() it will be made if the guard function returns True.
-If the lock has been granted, the decorated function is run as normal::
-
- from charmhelpers.core import hookenv
-
- hooks = hookenv.Hooks()
- serial = coordinator.Serial() # Must instantiate before execute()
-
- @require('restart', needs_restart)
- def maybe_restart():
- hookenv.service_restart('myservice')
-
- @hooks.hook('install', 'config-changed', 'upgrade-charm',
- # Peers and leader hooks must be wired up.
- 'cluster-relation-changed', 'cluster-relation-departed',
- 'leader-elected', 'leader-settings-changed')
- def default_hook():
- [...]
- maybe_restart()
-
- if __name__ == '__main__':
- hooks.execute()
-
-
-Details
-=======
-
-A simple API is provided similar to traditional locking APIs. A lock
-may be requested using the acquire() method, and the granted() method
-may be used do to check if a lock previously requested by acquire() has
-been granted. It doesn't matter how many times acquire() is called in a
-hook.
-
-Locks are released at the end of the hook they are acquired in. This may
-be the current hook if the unit is leader and the lock is free. It is
-more likely a future hook (probably leader-settings-changed, possibly
-the peers relation-changed or departed hook, potentially any hook).
-
-Whenever a charm needs to perform a coordinated action it will acquire()
-the lock and perform the action immediately if acquisition is
-successful. It will also need to perform the same action in every other
-hook if the lock has been granted.
-
-
-Grubby Details
---------------
-
-Why do you need to be able to perform the same action in every hook?
-If the unit is the leader, then it may be able to grant its own lock
-and perform the action immediately in the source hook. If the unit is
-the leader and cannot immediately grant the lock, then its only
-guaranteed chance of acquiring the lock is in the peers relation-joined,
-relation-changed or peers relation-departed hooks when another unit has
-released it (the only channel to communicate to the leader is the peers
-relation). If the unit is not the leader, then it is unlikely the lock
-is granted in the source hook (a previous hook must have also made the
-request for this to happen). A non-leader is notified about the lock via
-leader settings. These changes may be visible in any hook, even before
-the leader-settings-changed hook has been invoked. Or the requesting
-unit may be promoted to leader after making a request, in which case the
-lock may be granted in leader-elected or in a future peers
-relation-changed or relation-departed hook.
-
-This could be simpler if leader-settings-changed was invoked on the
-leader. We could then never grant locks except in
-leader-settings-changed hooks giving one place for the operation to be
-performed. Unfortunately this is not the case with Juju 1.23 leadership.
-
-But of course, this doesn't really matter to most people as most people
-seem to prefer the Services Framework or similar reset-the-world
-approaches, rather than the twisty maze of attempting to deduce what
-should be done based on what hook happens to be running (which always
-seems to evolve into reset-the-world anyway when the charm grows beyond
-the trivial).
-
-I chose not to implement a callback model, where a callback was passed
-to acquire to be executed when the lock is granted, because the callback
-may become invalid between making the request and the lock being granted
-due to an upgrade-charm being run in the interim. And it would create
-restrictions, such no lambdas, callback defined at the top level of a
-module, etc. Still, we could implement it on top of what is here, eg.
-by adding a defer decorator that stores a pickle of itself to disk and
-have BaseCoordinator unpickle and execute them when the locks are granted.
-'''
-from datetime import datetime
-from functools import wraps
-import json
-import os.path
-
-from six import with_metaclass
-
-from charmhelpers.core import hookenv
-
-
-# We make BaseCoordinator and subclasses singletons, so that if we
-# need to spill to local storage then only a single instance does so,
-# rather than having multiple instances stomp over each other.
-class Singleton(type):
- _instances = {}
-
- def __call__(cls, *args, **kwargs):
- if cls not in cls._instances:
- cls._instances[cls] = super(Singleton, cls).__call__(*args,
- **kwargs)
- return cls._instances[cls]
-
-
-class BaseCoordinator(with_metaclass(Singleton, object)):
- relid = None # Peer relation-id, set by __init__
- relname = None
-
- grants = None # self.grants[unit][lock] == timestamp
- requests = None # self.requests[unit][lock] == timestamp
-
- def __init__(self, relation_key='coordinator', peer_relation_name=None):
- '''Instatiate a Coordinator.
-
- Data is stored on the peers relation and in leadership storage
- under the provided relation_key.
-
- The peers relation is identified by peer_relation_name, and defaults
- to the first one found in metadata.yaml.
- '''
- # Most initialization is deferred, since invoking hook tools from
- # the constructor makes testing hard.
- self.key = relation_key
- self.relname = peer_relation_name
- hookenv.atstart(self.initialize)
-
- # Ensure that handle() is called, without placing that burden on
- # the charm author. They still need to do this manually if they
- # are not using a hook framework.
- hookenv.atstart(self.handle)
-
- def initialize(self):
- if self.requests is not None:
- return # Already initialized.
-
- assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'
-
- if self.relname is None:
- self.relname = _implicit_peer_relation_name()
-
- relids = hookenv.relation_ids(self.relname)
- if relids:
- self.relid = sorted(relids)[0]
-
- # Load our state, from leadership, the peer relationship, and maybe
- # local state as a fallback. Populates self.requests and self.grants.
- self._load_state()
- self._emit_state()
-
- # Save our state if the hook completes successfully.
- hookenv.atexit(self._save_state)
-
- # Schedule release of granted locks for the end of the hook.
- # This needs to be the last of our atexit callbacks to ensure
- # it will be run first when the hook is complete, because there
- # is no point mutating our state after it has been saved.
- hookenv.atexit(self._release_granted)
-
- def acquire(self, lock):
- '''Acquire the named lock, non-blocking.
-
- The lock may be granted immediately, or in a future hook.
-
- Returns True if the lock has been granted. The lock will be
- automatically released at the end of the hook in which it is
- granted.
-
- Do not mindlessly call this method, as it triggers a cascade of
- hooks. For example, if you call acquire() every time in your
- peers relation-changed hook you will end up with an infinite loop
- of hooks. It should almost always be guarded by some condition.
- '''
- unit = hookenv.local_unit()
- ts = self.requests[unit].get(lock)
- if not ts:
- # If there is no outstanding request on the peers relation,
- # create one.
- self.requests.setdefault(lock, {})
- self.requests[unit][lock] = _timestamp()
- self.msg('Requested {}'.format(lock))
-
- # If the leader has granted the lock, yay.
- if self.granted(lock):
- self.msg('Acquired {}'.format(lock))
- return True
-
- # If the unit making the request also happens to be the
- # leader, it must handle the request now. Even though the
- # request has been stored on the peers relation, the peers
- # relation-changed hook will not be triggered.
- if hookenv.is_leader():
- return self.grant(lock, unit)
-
- return False # Can't acquire lock, yet. Maybe next hook.
-
- def granted(self, lock):
- '''Return True if a previously requested lock has been granted'''
- unit = hookenv.local_unit()
- ts = self.requests[unit].get(lock)
- if ts and self.grants.get(unit, {}).get(lock) == ts:
- return True
- return False
-
- def requested(self, lock):
- '''Return True if we are in the queue for the lock'''
- return lock in self.requests[hookenv.local_unit()]
-
- def request_timestamp(self, lock):
- '''Return the timestamp of our outstanding request for lock, or None.
-
- Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute.
- '''
- ts = self.requests[hookenv.local_unit()].get(lock, None)
- if ts is not None:
- return datetime.strptime(ts, _timestamp_format)
-
- def handle(self):
- if not hookenv.is_leader():
- return # Only the leader can grant requests.
-
- self.msg('Leader handling coordinator requests')
-
- # Clear our grants that have been released.
- for unit in self.grants.keys():
- for lock, grant_ts in list(self.grants[unit].items()):
- req_ts = self.requests.get(unit, {}).get(lock)
- if req_ts != grant_ts:
- # The request timestamp does not match the granted
- # timestamp. Several hooks on 'unit' may have run
- # before the leader got a chance to make a decision,
- # and 'unit' may have released its lock and attempted
- # to reacquire it. This will change the timestamp,
- # and we correctly revoke the old grant putting it
- # to the end of the queue.
- ts = datetime.strptime(self.grants[unit][lock],
- _timestamp_format)
- del self.grants[unit][lock]
- self.released(unit, lock, ts)
-
- # Grant locks
- for unit in self.requests.keys():
- for lock in self.requests[unit]:
- self.grant(lock, unit)
-
- def grant(self, lock, unit):
- '''Maybe grant the lock to a unit.
-
- The decision to grant the lock or not is made for $lock
- by a corresponding method grant_$lock, which you may define
- in a subclass. If no such method is defined, the default_grant
- method is used. See Serial.default_grant() for details.
- '''
- if not hookenv.is_leader():
- return False # Not the leader, so we cannot grant.
-
- # Set of units already granted the lock.
- granted = set()
- for u in self.grants:
- if lock in self.grants[u]:
- granted.add(u)
- if unit in granted:
- return True # Already granted.
-
- # Ordered list of units waiting for the lock.
- reqs = set()
- for u in self.requests:
- if u in granted:
- continue # In the granted set. Not wanted in the req list.
- for l, ts in self.requests[u].items():
- if l == lock:
- reqs.add((ts, u))
- queue = [t[1] for t in sorted(reqs)]
- if unit not in queue:
- return False # Unit has not requested the lock.
-
- # Locate custom logic, or fallback to the default.
- grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
-
- if grant_func(lock, unit, granted, queue):
- # Grant the lock.
- self.msg('Leader grants {} to {}'.format(lock, unit))
- self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
- return True
-
- return False
-
- def released(self, unit, lock, timestamp):
- '''Called on the leader when it has released a lock.
-
- By default, does nothing but log messages. Override if you
- need to perform additional housekeeping when a lock is released,
- for example recording timestamps.
- '''
- interval = _utcnow() - timestamp
- self.msg('Leader released {} from {}, held {}'.format(lock, unit,
- interval))
-
- def require(self, lock, guard_func, *guard_args, **guard_kw):
- """Decorate a function to be run only when a lock is acquired.
-
- The lock is requested if the guard function returns True.
-
- The decorated function is called if the lock has been granted.
- """
- def decorator(f):
- @wraps(f)
- def wrapper(*args, **kw):
- if self.granted(lock):
- self.msg('Granted {}'.format(lock))
- return f(*args, **kw)
- if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
- return f(*args, **kw)
- return None
- return wrapper
- return decorator
-
- def msg(self, msg):
- '''Emit a message. Override to customize log spam.'''
- hookenv.log('coordinator.{} {}'.format(self._name(), msg),
- level=hookenv.INFO)
-
- def _name(self):
- return self.__class__.__name__
-
- def _load_state(self):
- self.msg('Loading state'.format(self._name()))
-
- # All responses must be stored in the leadership settings.
- # The leader cannot use local state, as a different unit may
- # be leader next time. Which is fine, as the leadership
- # settings are always available.
- self.grants = json.loads(hookenv.leader_get(self.key) or '{}')
-
- local_unit = hookenv.local_unit()
-
- # All requests must be stored on the peers relation. This is
- # the only channel units have to communicate with the leader.
- # Even the leader needs to store its requests here, as a
- # different unit may be leader by the time the request can be
- # granted.
- if self.relid is None:
- # The peers relation is not available. Maybe we are early in
- # the units's lifecycle. Maybe this unit is standalone.
- # Fallback to using local state.
- self.msg('No peer relation. Loading local state')
- self.requests = {local_unit: self._load_local_state()}
- else:
- self.requests = self._load_peer_state()
- if local_unit not in self.requests:
- # The peers relation has just been joined. Update any state
- # loaded from our peers with our local state.
- self.msg('New peer relation. Merging local state')
- self.requests[local_unit] = self._load_local_state()
-
- def _emit_state(self):
- # Emit this units lock status.
- for lock in sorted(self.requests[hookenv.local_unit()].keys()):
- if self.granted(lock):
- self.msg('Granted {}'.format(lock))
- else:
- self.msg('Waiting on {}'.format(lock))
-
- def _save_state(self):
- self.msg('Publishing state'.format(self._name()))
- if hookenv.is_leader():
- # sort_keys to ensure stability.
- raw = json.dumps(self.grants, sort_keys=True)
- hookenv.leader_set({self.key: raw})
-
- local_unit = hookenv.local_unit()
-
- if self.relid is None:
- # No peers relation yet. Fallback to local state.
- self.msg('No peer relation. Saving local state')
- self._save_local_state(self.requests[local_unit])
- else:
- # sort_keys to ensure stability.
- raw = json.dumps(self.requests[local_unit], sort_keys=True)
- hookenv.relation_set(self.relid, relation_settings={self.key: raw})
-
- def _load_peer_state(self):
- requests = {}
- units = set(hookenv.related_units(self.relid))
- units.add(hookenv.local_unit())
- for unit in units:
- raw = hookenv.relation_get(self.key, unit, self.relid)
- if raw:
- requests[unit] = json.loads(raw)
- return requests
-
- def _local_state_filename(self):
- # Include the class name. We allow multiple BaseCoordinator
- # subclasses to be instantiated, and they are singletons, so
- # this avoids conflicts (unless someone creates and uses two
- # BaseCoordinator subclasses with the same class name, so don't
- # do that).
- return '.charmhelpers.coordinator.{}'.format(self._name())
-
- def _load_local_state(self):
- fn = self._local_state_filename()
- if os.path.exists(fn):
- with open(fn, 'r') as f:
- return json.load(f)
- return {}
-
- def _save_local_state(self, state):
- fn = self._local_state_filename()
- with open(fn, 'w') as f:
- json.dump(state, f)
-
- def _release_granted(self):
- # At the end of every hook, release all locks granted to
- # this unit. If a hook neglects to make use of what it
- # requested, it will just have to make the request again.
- # Implicit release is the only way this will work, as
- # if the unit is standalone there may be no future triggers
- # called to do a manual release.
- unit = hookenv.local_unit()
- for lock in list(self.requests[unit].keys()):
- if self.granted(lock):
- self.msg('Released local {} lock'.format(lock))
- del self.requests[unit][lock]
-
-
-class Serial(BaseCoordinator):
- def default_grant(self, lock, unit, granted, queue):
- '''Default logic to grant a lock to a unit. Unless overridden,
- only one unit may hold the lock and it will be granted to the
- earliest queued request.
-
- To define custom logic for $lock, create a subclass and
- define a grant_$lock method.
-
- `unit` is the unit name making the request.
-
- `granted` is the set of units already granted the lock. It will
- never include `unit`. It may be empty.
-
- `queue` is the list of units waiting for the lock, ordered by time
- of request. It will always include `unit`, but `unit` is not
- necessarily first.
-
- Returns True if the lock should be granted to `unit`.
- '''
- return unit == queue[0] and not granted
-
-
-def _implicit_peer_relation_name():
- md = hookenv.metadata()
- assert 'peers' in md, 'No peer relations in metadata.yaml'
- return sorted(md['peers'].keys())[0]
-
-
-# A human readable, sortable UTC timestamp format.
-_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ'
-
-
-def _utcnow(): # pragma: no cover
- # This wrapper exists as mocking datetime methods is problematic.
- return datetime.utcnow()
-
-
-def _timestamp():
- return _utcnow().strftime(_timestamp_format)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py b/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/files.py b/charms/trusty/cassandra/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py b/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py b/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index 3912e6e..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1026 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import socket
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return _ensure_ip(unit_get('public-address'))
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return _ensure_ip(unit_get('private-address'))
-
-
-def _ensure_ip(addr):
- """If addr is a hostname, resolve it to an IP address"""
- if not addr:
- return None
- # We need to use socket.getaddrinfo for IPv6 support.
- info = socket.getaddrinfo(addr, None)
- if info is None:
- # Should never happen
- raise ValueError("Invalid result None from getaddinfo")
- try:
- return info[0][4][0]
- except IndexError:
- # Should never happen
- raise ValueError("Invalid result {!r} from getaddinfo".format(info))
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The <class> and <id> provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The <class> and <id> provided must match a payload that has been previously
- registered with juju using payload-register. The <status> must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- <name> must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).strip()
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/host.py b/charms/trusty/cassandra/hooks/charmhelpers/core/host.py
deleted file mode 100644
index 481087b..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,695 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = True
- if service_running(service_name):
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_running(service_name)
- if not started:
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service_name):
- """Determine whether a system service is running"""
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- try:
- output = subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output or
- "up and running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False,
- primary_group=None, secondary_groups=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @returns result of lambda_f()
- """
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for action in actions:
- for service_name in services_list:
- service(action, service_name)
- return r
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also Chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py b/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index a783ad9..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py b/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 2423704..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py b/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py b/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py b/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index d2d8eaf..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py b/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 6dfe7ed..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes', '--force-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except NotImplementedError:
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index b8e0943..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'wb') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index cafd27f..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-from charmhelpers.core.host import mkdir
-
-
-if filter_installed_packages(['bzr']) != []:
- apt_install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
- else:
- return True
-
- def branch(self, source, dest):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if os.path.exists(dest):
- check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
- else:
- check_call(['bzr', 'branch', source, dest])
-
- def install(self, source, dest=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
-
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index 65ed531..0000000
--- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call, CalledProcessError
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-
-if filter_installed_packages(['git']) != []:
- apt_install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
- else:
- return True
-
- def clone(self, source, dest, branch="master", depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
- else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
- if depth:
- cmd.extend(['--depth', depth])
- check_call(cmd)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- try:
- self.clone(source, dest_dir, branch, depth)
- except CalledProcessError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/cassandra/hooks/cluster-relation-changed b/charms/trusty/cassandra/hooks/cluster-relation-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/cluster-relation-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/cluster-relation-departed b/charms/trusty/cassandra/hooks/cluster-relation-departed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/cluster-relation-departed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/config-changed b/charms/trusty/cassandra/hooks/config-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/config-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/coordinator.py b/charms/trusty/cassandra/hooks/coordinator.py
deleted file mode 100644
index c353671..0000000
--- a/charms/trusty/cassandra/hooks/coordinator.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from charmhelpers.coordinator import BaseCoordinator
-
-
-class CassandraCoordinator(BaseCoordinator):
- def default_grant(self, lock, unit, granted, queue):
- '''Grant locks to only one unit at a time, regardless of its name.
-
- This lets us keep separate locks like repair and restart,
- while ensuring the operations do not occur on different nodes
- at the same time.
- '''
- # Return True if this unit has already been granted a lock.
- if self.grants.get(unit):
- return True
-
- # Otherwise, return True if the unit is first in the queue.
- return queue[0] == unit and not granted
-
-
-coordinator = CassandraCoordinator()
diff --git a/charms/trusty/cassandra/hooks/data-relation-changed b/charms/trusty/cassandra/hooks/data-relation-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/data-relation-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/data-relation-departed b/charms/trusty/cassandra/hooks/data-relation-departed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/data-relation-departed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/database-admin-relation-changed b/charms/trusty/cassandra/hooks/database-admin-relation-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/database-admin-relation-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/database-relation-changed b/charms/trusty/cassandra/hooks/database-relation-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/database-relation-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/definitions.py b/charms/trusty/cassandra/hooks/definitions.py
deleted file mode 100644
index 24f9497..0000000
--- a/charms/trusty/cassandra/hooks/definitions.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import services
-
-import actions
-import helpers
-import relations
-
-
-def get_service_definitions():
- # This looks like it could be a module level global list, but
- # unfortunately that makes the module unimportable outside of a
- # hook context. The main culprit is RelationContext, which invokes
- # relation-get from its constructor. By wrapping the service
- # definition list in this function, we can defer constructing it
- # until we have constructed enough of a mock context and perform
- # basic tests.
- config = hookenv.config()
-
- return [
- # Prepare for the Cassandra service.
- dict(service='install',
- data_ready=[actions.set_proxy,
- actions.preinstall,
- actions.emit_meminfo,
- actions.revert_unchangeable_config,
- actions.store_unit_private_ip,
- actions.add_implicit_package_signing_keys,
- actions.configure_sources,
- actions.swapoff,
- actions.reset_sysctl,
- actions.reset_limits,
- actions.install_oracle_jre,
- actions.install_cassandra_packages,
- actions.emit_java_version,
- actions.ensure_cassandra_package_status],
- start=[], stop=[]),
-
- # Get Cassandra running.
- dict(service=helpers.get_cassandra_service(),
-
- # Open access to client and replication ports. Client
- # protocols require password authentication. Access to
- # the unauthenticated replication ports is protected via
- # ufw firewall rules. We do not open the JMX port, although
- # we could since it is similarly protected by ufw.
- ports=[config['rpc_port'], # Thrift clients
- config['native_transport_port'], # Native clients.
- config['storage_port'], # Plaintext replication
- config['ssl_storage_port']], # Encrypted replication.
-
- required_data=[relations.StorageRelation(),
- relations.PeerRelation()],
- provided_data=[relations.StorageRelation()],
- data_ready=[actions.configure_firewall,
- actions.update_etc_hosts,
- actions.maintain_seeds,
- actions.configure_cassandra_yaml,
- actions.configure_cassandra_env,
- actions.configure_cassandra_rackdc,
- actions.reset_all_io_schedulers,
- actions.maybe_restart,
- actions.request_unit_superuser,
- actions.reset_default_password],
- start=[services.open_ports],
- stop=[actions.stop_cassandra, services.close_ports]),
-
- # Actions that must be done while Cassandra is running.
- dict(service='post',
- required_data=[RequiresLiveNode()],
- data_ready=[actions.post_bootstrap,
- actions.create_unit_superusers,
- actions.reset_auth_keyspace_replication,
- actions.publish_database_relations,
- actions.publish_database_admin_relations,
- actions.install_maintenance_crontab,
- actions.nrpe_external_master_relation,
- actions.emit_cluster_info,
- actions.set_active],
- start=[], stop=[])]
-
-
-class RequiresLiveNode:
- def __bool__(self):
- is_live = self.is_live()
- hookenv.log('Requirement RequiresLiveNode: {}'.format(is_live),
- hookenv.DEBUG)
- return is_live
-
- def is_live(self):
- if helpers.is_decommissioned():
- hookenv.log('Node is decommissioned')
- return False
-
- if helpers.is_cassandra_running():
- hookenv.log('Cassandra is running')
- auth = hookenv.config()['authenticator']
- if auth == 'AllowAllAuthenticator':
- return True
- elif hookenv.local_unit() in helpers.get_unit_superusers():
- hookenv.log('Credentials created')
- return True
- else:
- hookenv.log('Credentials have not been created')
- return False
- else:
- hookenv.log('Cassandra is not running')
- return False
-
-
-def get_service_manager():
- return services.ServiceManager(get_service_definitions())
diff --git a/charms/trusty/cassandra/hooks/helpers.py b/charms/trusty/cassandra/hooks/helpers.py
deleted file mode 100644
index b86a6b1..0000000
--- a/charms/trusty/cassandra/hooks/helpers.py
+++ /dev/null
@@ -1,1084 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import configparser
-from contextlib import contextmanager
-from datetime import timedelta
-from distutils.version import LooseVersion
-import errno
-from functools import wraps
-import io
-import json
-import os.path
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-from textwrap import dedent
-import time
-
-import bcrypt
-from cassandra import ConsistencyLevel
-import cassandra.auth
-import cassandra.cluster
-import cassandra.query
-import yaml
-
-from charmhelpers.core import hookenv, host
-from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING
-from charmhelpers import fetch
-
-from coordinator import coordinator
-
-
-RESTART_TIMEOUT = 600
-
-
-def logged(func):
- @wraps(func)
- def wrapper(*args, **kw):
- hookenv.log("* Helper {}/{}".format(hookenv.hook_name(),
- func.__name__))
- return func(*args, **kw)
- return wrapper
-
-
-def backoff(what_for, max_pause=60):
- i = 0
- while True:
- yield True
- i += 1
- pause = min(max_pause, 2 ** i)
- time.sleep(pause)
- if pause > 10:
- hookenv.log('Recheck {} for {}'.format(i, what_for))
-
-
-# FOR CHARMHELPERS
-@contextmanager
-def autostart_disabled(services=None, _policy_rc='/usr/sbin/policy-rc.d'):
- '''Tell well behaved Debian packages to not start services when installed.
- '''
- script = ['#!/bin/sh']
- if services is not None:
- for service in services:
- script.append(
- 'if [ "$1" = "{}" ]; then exit 101; fi'.format(service))
- script.append('exit 0')
- else:
- script.append('exit 101') # By default, all daemons disabled.
- try:
- if os.path.exists(_policy_rc):
- shutil.move(_policy_rc, "{}-orig".format(_policy_rc))
- host.write_file(_policy_rc, '\n'.join(script).encode('ASCII'),
- perms=0o555)
- yield
- finally:
- os.unlink(_policy_rc)
- if os.path.exists("{}-orig".format(_policy_rc)):
- shutil.move("{}-orig".format(_policy_rc), _policy_rc)
-
-
-# FOR CHARMHELPERS
-@logged
-def install_packages(packages):
- packages = list(packages)
- if hookenv.config('extra_packages'):
- packages.extend(hookenv.config('extra_packages').split())
- packages = fetch.filter_installed_packages(packages)
- if packages:
- # The DSE packages are huge, so this might take some time.
- status_set('maintenance', 'Installing packages')
- with autostart_disabled(['cassandra']):
- fetch.apt_install(packages, fatal=True)
-
-
-# FOR CHARMHELPERS
-@logged
-def ensure_package_status(packages):
- config_dict = hookenv.config()
-
- package_status = config_dict['package_status']
-
- if package_status not in ['install', 'hold']:
- raise RuntimeError("package_status must be 'install' or 'hold', "
- "not {!r}".format(package_status))
-
- selections = []
- for package in packages:
- selections.append('{} {}\n'.format(package, package_status))
- dpkg = subprocess.Popen(['dpkg', '--set-selections'],
- stdin=subprocess.PIPE)
- dpkg.communicate(input=''.join(selections).encode('US-ASCII'))
-
-
-def get_seed_ips():
- '''Return the set of seed ip addresses.
-
- We use ip addresses rather than unit names, as we may need to use
- external seed ips at some point.
- '''
- return set((hookenv.leader_get('seeds') or '').split(','))
-
-
-def actual_seed_ips():
- '''Return the seeds currently in cassandra.yaml'''
- cassandra_yaml = read_cassandra_yaml()
- s = cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds']
- return set(s.split(','))
-
-
-def get_database_directory(config_path):
- '''Convert a database path from the service config to an absolute path.
-
- Entries in the config file may be absolute, relative to
- /var/lib/cassandra, or relative to the mountpoint.
- '''
- import relations
- storage = relations.StorageRelation()
- if storage.mountpoint:
- root = os.path.join(storage.mountpoint, 'cassandra')
- else:
- root = '/var/lib/cassandra'
- return os.path.join(root, config_path)
-
-
-def ensure_database_directory(config_path):
- '''Create the database directory if it doesn't exist, resetting
- ownership and other settings while we are at it.
-
- Returns the absolute path.
- '''
- absdir = get_database_directory(config_path)
-
- # Work around Bug #1427150 by ensuring components of the path are
- # created with the required permissions, if necessary.
- component = os.sep
- for p in absdir.split(os.sep)[1:-1]:
- component = os.path.join(component, p)
- if not os.path.exists(p):
- host.mkdir(component)
- assert component == os.path.split(absdir)[0]
- host.mkdir(absdir, owner='cassandra', group='cassandra', perms=0o750)
- return absdir
-
-
-def get_all_database_directories():
- config = hookenv.config()
- dirs = dict(
- data_file_directories=[get_database_directory(d)
- for d in (config['data_file_directories'] or
- 'data').split()],
- commitlog_directory=get_database_directory(
- config['commitlog_directory'] or 'commitlog'),
- saved_caches_directory=get_database_directory(
- config['saved_caches_directory'] or 'saved_caches'))
- if has_cassandra_version('3.0'):
- # Not yet configurable. Make configurable with Juju native storage.
- dirs['hints_directory'] = get_database_directory('hints')
- return dirs
-
-
-def mountpoint(path):
- '''Return the mountpoint that path exists on.'''
- path = os.path.realpath(path)
- while path != '/' and not os.path.ismount(path):
- path = os.path.dirname(path)
- return path
-
-
-# FOR CHARMHELPERS
-def is_lxc():
- '''Return True if we are running inside an LXC container.'''
- with open('/proc/1/cgroup', 'r') as f:
- return ':/lxc/' in f.readline()
-
-
-# FOR CHARMHELPERS
-def set_io_scheduler(io_scheduler, directory):
- '''Set the block device io scheduler.'''
-
- assert os.path.isdir(directory)
-
- # The block device regex may be a tad simplistic.
- block_regex = re.compile('\/dev\/([a-z]*)', re.IGNORECASE)
-
- output = subprocess.check_output(['df', directory],
- universal_newlines=True)
-
- if not is_lxc():
- hookenv.log("Setting block device of {} to IO scheduler {}"
- "".format(directory, io_scheduler))
- try:
- block_dev = re.findall(block_regex, output)[0]
- except IndexError:
- hookenv.log("Unable to locate block device of {} (in container?)"
- "".format(directory))
- return
- sys_file = os.path.join("/", "sys", "block", block_dev,
- "queue", "scheduler")
- try:
- host.write_file(sys_file, io_scheduler.encode('ascii'),
- perms=0o644)
- except OSError as e:
- if e.errno == errno.EACCES:
- hookenv.log("Got Permission Denied trying to set the "
- "IO scheduler at {}. We may be in an LXC. "
- "Exiting gracefully".format(sys_file),
- WARNING)
- elif e.errno == errno.ENOENT:
- hookenv.log("Got no such file or directory trying to "
- "set the IO scheduler at {}. It may be "
- "this is an LXC, the device name is as "
- "yet unknown to the charm, or LVM/RAID is "
- "hiding the underlying device name. "
- "Exiting gracefully".format(sys_file),
- WARNING)
- else:
- raise e
- else:
- # Make no change if we are in an LXC
- hookenv.log("In an LXC. Cannot set io scheduler {}"
- "".format(io_scheduler))
-
-
-# FOR CHARMHELPERS
-def recursive_chown(directory, owner="root", group="root"):
- '''Change ownership of all files and directories in 'directory'.
-
- Ownership of 'directory' is also reset.
- '''
- shutil.chown(directory, owner, group)
- for root, dirs, files in os.walk(directory):
- for dirname in dirs:
- shutil.chown(os.path.join(root, dirname), owner, group)
- for filename in files:
- shutil.chown(os.path.join(root, filename), owner, group)
-
-
-def maybe_backup(path):
- '''Copy a file to file.orig, if file.orig does not already exist.'''
- backup_path = path + '.orig'
- if not os.path.exists(backup_path):
- with open(path, 'rb') as f:
- host.write_file(backup_path, f.read(), perms=0o600)
-
-
-# FOR CHARMHELPERS
-def get_package_version(package):
- cache = fetch.apt_cache()
- if package not in cache:
- return None
- pkgver = cache[package].current_ver
- if pkgver is not None:
- return pkgver.ver_str
- return None
-
-
-def get_jre():
- # DataStax Enterprise requires the Oracle JRE.
- if get_cassandra_edition() == 'dse':
- return 'oracle'
-
- config = hookenv.config()
- jre = config['jre'].lower()
- if jre not in ('openjdk', 'oracle'):
- hookenv.log('Unknown JRE {!r} specified. Using OpenJDK'.format(jre),
- ERROR)
- jre = 'openjdk'
- return jre
-
-
-def get_cassandra_edition():
- config = hookenv.config()
- edition = config['edition'].lower()
- if edition not in ('community', 'dse'):
- hookenv.log('Unknown edition {!r}. Using community.'.format(edition),
- ERROR)
- edition = 'community'
- return edition
-
-
-def get_cassandra_service():
- '''Cassandra upstart service'''
- if get_cassandra_edition() == 'dse':
- return 'dse'
- return 'cassandra'
-
-
-def get_cassandra_version():
- if get_cassandra_edition() == 'dse':
- dse_ver = get_package_version('dse-full')
- if not dse_ver:
- return None
- elif LooseVersion(dse_ver) >= LooseVersion('5.0'):
- return '3.0'
- elif LooseVersion(dse_ver) >= LooseVersion('4.7'):
- return '2.1'
- else:
- return '2.0'
- return get_package_version('cassandra')
-
-
-def has_cassandra_version(minimum_ver):
- cassandra_version = get_cassandra_version()
- assert cassandra_version is not None, 'Cassandra package not yet installed'
- return LooseVersion(cassandra_version) >= LooseVersion(minimum_ver)
-
-
-def get_cassandra_config_dir():
- if get_cassandra_edition() == 'dse':
- return '/etc/dse/cassandra'
- else:
- return '/etc/cassandra'
-
-
-def get_cassandra_yaml_file():
- return os.path.join(get_cassandra_config_dir(), "cassandra.yaml")
-
-
-def get_cassandra_env_file():
- return os.path.join(get_cassandra_config_dir(), "cassandra-env.sh")
-
-
-def get_cassandra_rackdc_file():
- return os.path.join(get_cassandra_config_dir(),
- "cassandra-rackdc.properties")
-
-
-def get_cassandra_pid_file():
- edition = get_cassandra_edition()
- if edition == 'dse':
- pid_file = "/var/run/dse/dse.pid"
- else:
- pid_file = "/var/run/cassandra/cassandra.pid"
- return pid_file
-
-
-def get_cassandra_packages():
- edition = get_cassandra_edition()
- if edition == 'dse':
- packages = set(['dse-full'])
- else:
- packages = set(['cassandra']) # 'cassandra-tools'
-
- packages.add('ntp')
- packages.add('run-one')
- packages.add('netcat')
-
- jre = get_jre()
- if jre == 'oracle':
- # We can't use a packaged version of the Oracle JRE, as we
- # are not allowed to bypass Oracle's click through license
- # agreement.
- pass
- else:
- # NB. OpenJDK 8 not available in trusty. This needs to come
- # from a PPA or some other configured source.
- packages.add('openjdk-8-jre-headless')
-
- return packages
-
-
-@logged
-def stop_cassandra():
- if is_cassandra_running():
- hookenv.log('Shutting down Cassandra')
- host.service_stop(get_cassandra_service())
- if is_cassandra_running():
- hookenv.status_set('blocked', 'Cassandra failed to shut down')
- raise SystemExit(0)
-
-
-@logged
-def start_cassandra():
- if is_cassandra_running():
- return
-
- actual_seeds = sorted(actual_seed_ips())
- assert actual_seeds, 'Attempting to start cassandra with empty seed list'
- hookenv.config()['configured_seeds'] = actual_seeds
-
- if is_bootstrapped():
- status_set('maintenance',
- 'Starting Cassandra with seeds {!r}'
- .format(','.join(actual_seeds)))
- else:
- status_set('maintenance',
- 'Bootstrapping with seeds {}'
- .format(','.join(actual_seeds)))
-
- host.service_start(get_cassandra_service())
-
- # Wait for Cassandra to actually start, or abort.
- timeout = time.time() + RESTART_TIMEOUT
- while time.time() < timeout:
- if is_cassandra_running():
- return
- time.sleep(1)
- status_set('blocked', 'Cassandra failed to start')
- raise SystemExit(0)
-
-
-@logged
-def reconfigure_and_restart_cassandra(overrides={}):
- stop_cassandra()
- configure_cassandra_yaml(overrides)
- start_cassandra()
-
-
-@logged
-def remount_cassandra():
- '''If a new mountpoint is ready, migrate data across to it.'''
- assert not is_cassandra_running() # Guard against data loss.
- import relations
- storage = relations.StorageRelation()
- if storage.needs_remount():
- status_set('maintenance', 'Migrating data to new mountpoint')
- hookenv.config()['bootstrapped_into_cluster'] = False
- if storage.mountpoint is None:
- hookenv.log('External storage AND DATA gone. '
- 'Reverting to local storage. '
- 'In danger of resurrecting old data. ',
- WARNING)
- else:
- storage.migrate('/var/lib/cassandra', 'cassandra')
- root = os.path.join(storage.mountpoint, 'cassandra')
- os.chmod(root, 0o750)
-
-
-@logged
-def ensure_database_directories():
- '''Ensure that directories Cassandra expects to store its data in exist.'''
- # Guard against changing perms on a running db. Although probably
- # harmless, it causes shutil.chown() to fail.
- assert not is_cassandra_running()
- db_dirs = get_all_database_directories()
- ensure_database_directory(db_dirs['commitlog_directory'])
- ensure_database_directory(db_dirs['saved_caches_directory'])
- if 'hints_directory' in db_dirs:
- ensure_database_directory(db_dirs['hints_directory'])
- for db_dir in db_dirs['data_file_directories']:
- ensure_database_directory(db_dir)
-
-
-CONNECT_TIMEOUT = 10
-
-
-@contextmanager
-def connect(username=None, password=None, timeout=CONNECT_TIMEOUT,
- auth_timeout=CONNECT_TIMEOUT):
- # We pull the currently configured listen address and port from the
- # yaml, rather than the service configuration, as it may have been
- # overridden.
- cassandra_yaml = read_cassandra_yaml()
- address = cassandra_yaml['rpc_address']
- if address == '0.0.0.0':
- address = 'localhost'
- port = cassandra_yaml['native_transport_port']
-
- if username is None or password is None:
- username, password = superuser_credentials()
-
- auth = hookenv.config()['authenticator']
- if auth == 'AllowAllAuthenticator':
- auth_provider = None
- else:
- auth_provider = cassandra.auth.PlainTextAuthProvider(username=username,
- password=password)
-
- # Although we specify a reconnection_policy, it does not apply to
- # the initial connection so we retry in a loop.
- start = time.time()
- until = start + timeout
- auth_until = start + auth_timeout
- while True:
- cluster = cassandra.cluster.Cluster([address], port=port,
- auth_provider=auth_provider)
- try:
- session = cluster.connect()
- session.default_timeout = timeout
- break
- except cassandra.cluster.NoHostAvailable as x:
- cluster.shutdown()
- now = time.time()
- # If every node failed auth, reraise one of the
- # AuthenticationFailed exceptions. Unwrapping the exception
- # means call sites don't have to sniff the exception bundle.
- # We don't retry on auth fails; this method should not be
- # called if the system_auth data is inconsistent.
- auth_fails = [af for af in x.errors.values()
- if isinstance(af, cassandra.AuthenticationFailed)]
- if auth_fails:
- if now > auth_until:
- raise auth_fails[0]
- if now > until:
- raise
- time.sleep(1)
- try:
- yield session
- finally:
- cluster.shutdown()
-
-
-QUERY_TIMEOUT = 60
-
-
-def query(session, statement, consistency_level, args=None):
- q = cassandra.query.SimpleStatement(statement,
- consistency_level=consistency_level)
-
- until = time.time() + QUERY_TIMEOUT
- for _ in backoff('query to execute'):
- try:
- return session.execute(q, args)
- except Exception:
- if time.time() > until:
- raise
-
-
-def encrypt_password(password):
- return bcrypt.hashpw(password, bcrypt.gensalt())
-
-
-@logged
-def ensure_user(session, username, encrypted_password, superuser=False):
- '''Create the DB user if it doesn't already exist & reset the password.'''
- auth = hookenv.config()['authenticator']
- if auth == 'AllowAllAuthenticator':
- return # No authentication means we cannot create users
-
- if superuser:
- hookenv.log('Creating SUPERUSER {}'.format(username))
- else:
- hookenv.log('Creating user {}'.format(username))
- if has_cassandra_version('2.2'):
- query(session,
- 'INSERT INTO system_auth.roles '
- '(role, can_login, is_superuser, salted_hash) '
- 'VALUES (%s, TRUE, %s, %s)',
- ConsistencyLevel.ALL,
- (username, superuser, encrypted_password))
- else:
- query(session,
- 'INSERT INTO system_auth.users (name, super) VALUES (%s, %s)',
- ConsistencyLevel.ALL, (username, superuser))
- query(session,
- 'INSERT INTO system_auth.credentials (username, salted_hash) '
- 'VALUES (%s, %s)',
- ConsistencyLevel.ALL, (username, encrypted_password))
-
-
-@logged
-def create_unit_superuser_hard():
- '''Create or recreate the unit's superuser account.
-
- This method is used when there are no known superuser credentials
- to use. We restart the node using the AllowAllAuthenticator and
- insert our credentials directly into the system_auth keyspace.
- '''
- username, password = superuser_credentials()
- pwhash = encrypt_password(password)
- hookenv.log('Creating unit superuser {}'.format(username))
-
- # Restart cassandra without authentication & listening on localhost.
- reconfigure_and_restart_cassandra(
- dict(authenticator='AllowAllAuthenticator', rpc_address='localhost'))
- for _ in backoff('superuser creation'):
- try:
- with connect() as session:
- ensure_user(session, username, pwhash, superuser=True)
- break
- except Exception as x:
- print(str(x))
-
- # Restart Cassandra with regular config.
- nodetool('flush') # Ensure our backdoor updates are flushed.
- reconfigure_and_restart_cassandra()
-
-
-def get_cqlshrc_path():
- return os.path.expanduser('~root/.cassandra/cqlshrc')
-
-
-def superuser_username():
- return 'juju_{}'.format(re.subn(r'\W', '_', hookenv.local_unit())[0])
-
-
-def superuser_credentials():
- '''Return (username, password) to connect to the Cassandra superuser.
-
- The credentials are persisted in the root user's cqlshrc file,
- making them easily accessible to the command line tools.
- '''
- cqlshrc_path = get_cqlshrc_path()
- cqlshrc = configparser.ConfigParser(interpolation=None)
- cqlshrc.read([cqlshrc_path])
-
- username = superuser_username()
-
- try:
- section = cqlshrc['authentication']
- # If there happened to be an existing cqlshrc file, it might
- # contain invalid credentials. Ignore them.
- if section['username'] == username:
- return section['username'], section['password']
- except KeyError:
- hookenv.log('Generating superuser credentials into {}'.format(
- cqlshrc_path))
-
- config = hookenv.config()
-
- password = host.pwgen()
-
- hookenv.log('Generated username {}'.format(username))
-
- # We set items separately, rather than together, so that we have a
- # defined order for the ConfigParser to preserve and the tests to
- # rely on.
- cqlshrc.setdefault('authentication', {})
- cqlshrc['authentication']['username'] = username
- cqlshrc['authentication']['password'] = password
- cqlshrc.setdefault('connection', {})
- cqlshrc['connection']['hostname'] = hookenv.unit_public_ip()
- if get_cassandra_version().startswith('2.0'):
- cqlshrc['connection']['port'] = str(config['rpc_port'])
- else:
- cqlshrc['connection']['port'] = str(config['native_transport_port'])
-
- ini = io.StringIO()
- cqlshrc.write(ini)
- host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700)
- host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400)
-
- return username, password
-
-
-def emit(*args, **kw):
- # Just like print, but with plumbing and mocked out in the test suite.
- print(*args, **kw)
- sys.stdout.flush()
-
-
-def nodetool(*cmd, timeout=120):
- cmd = ['nodetool'] + [str(i) for i in cmd]
- i = 0
- until = time.time() + timeout
- for _ in backoff('nodetool to work'):
- i += 1
- try:
- if timeout is not None:
- timeout = max(0, until - time.time())
- raw = subprocess.check_output(cmd, universal_newlines=True,
- timeout=timeout,
- stderr=subprocess.STDOUT)
-
- # Work around CASSANDRA-8776.
- if 'status' in cmd and 'Error:' in raw:
- hookenv.log('Error detected but nodetool returned success.',
- WARNING)
- raise subprocess.CalledProcessError(99, cmd, raw)
-
- hookenv.log('{} succeeded'.format(' '.join(cmd)), DEBUG)
- out = raw.expandtabs()
- emit(out)
- return out
-
- except subprocess.CalledProcessError as x:
- if i > 1:
- emit(x.output.expandtabs()) # Expand tabs for juju debug-log.
- if not is_cassandra_running():
- status_set('blocked',
- 'Cassandra has unexpectedly shutdown')
- raise SystemExit(0)
- if time.time() >= until:
- raise
-
-
-def num_nodes():
- return len(get_bootstrapped_ips())
-
-
-def read_cassandra_yaml():
- cassandra_yaml_path = get_cassandra_yaml_file()
- with open(cassandra_yaml_path, 'rb') as f:
- return yaml.safe_load(f)
-
-
-@logged
-def write_cassandra_yaml(cassandra_yaml):
- cassandra_yaml_path = get_cassandra_yaml_file()
- host.write_file(cassandra_yaml_path,
- yaml.safe_dump(cassandra_yaml).encode('UTF-8'))
-
-
-def configure_cassandra_yaml(overrides={}, seeds=None):
- cassandra_yaml_path = get_cassandra_yaml_file()
- config = hookenv.config()
-
- maybe_backup(cassandra_yaml_path) # Its comments may be useful.
-
- cassandra_yaml = read_cassandra_yaml()
-
- # Most options just copy from config.yaml keys with the same name.
- # Using the same name is preferred to match the actual Cassandra
- # documentation.
- simple_config_keys = ['cluster_name', 'num_tokens',
- 'partitioner', 'authorizer', 'authenticator',
- 'compaction_throughput_mb_per_sec',
- 'stream_throughput_outbound_megabits_per_sec',
- 'tombstone_warn_threshold',
- 'tombstone_failure_threshold',
- 'native_transport_port', 'rpc_port',
- 'storage_port', 'ssl_storage_port']
- cassandra_yaml.update((k, config[k]) for k in simple_config_keys)
-
- seeds = ','.join(seeds or get_seed_ips()) # Don't include whitespace!
- hookenv.log('Configuring seeds as {!r}'.format(seeds), DEBUG)
- cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds'] = seeds
-
- cassandra_yaml['listen_address'] = hookenv.unit_private_ip()
- cassandra_yaml['rpc_address'] = '0.0.0.0'
- if not get_cassandra_version().startswith('2.0'):
- cassandra_yaml['broadcast_rpc_address'] = hookenv.unit_public_ip()
-
- dirs = get_all_database_directories()
- cassandra_yaml.update(dirs)
-
- # GossipingPropertyFileSnitch is the only snitch recommended for
- # production. It we allow others, we need to consider how to deal
- # with the system_auth keyspace replication settings.
- cassandra_yaml['endpoint_snitch'] = 'GossipingPropertyFileSnitch'
-
- # Per Bug #1523546 and CASSANDRA-9319, Thrift is disabled by default in
- # Cassandra 2.2. Ensure it is enabled if rpc_port is non-zero.
- if int(config['rpc_port']) > 0:
- cassandra_yaml['start_rpc'] = True
-
- cassandra_yaml.update(overrides)
-
- write_cassandra_yaml(cassandra_yaml)
-
-
-def get_pid_from_file(pid_file):
- try:
- with open(pid_file, 'r') as f:
- pid = int(f.read().strip().split()[0])
- if pid <= 1:
- raise ValueError('Illegal pid {}'.format(pid))
- return pid
- except (ValueError, IndexError) as e:
- hookenv.log("Invalid PID in {}.".format(pid_file))
- raise ValueError(e)
-
-
-def is_cassandra_running():
- pid_file = get_cassandra_pid_file()
-
- try:
- for _ in backoff('Cassandra to respond'):
- # We reload the pid every time, in case it has gone away.
- # If it goes away, a FileNotFound exception is raised.
- pid = get_pid_from_file(pid_file)
-
- # This does not kill the process but checks for its
- # existence. It raises an ProcessLookupError if the process
- # is not running.
- os.kill(pid, 0)
-
- if subprocess.call(["nodetool", "status"],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL) == 0:
- hookenv.log(
- "Cassandra PID {} is running and responding".format(pid))
- return True
- except FileNotFoundError:
- hookenv.log("Cassandra is not running. PID file does not exist.")
- return False
- except ProcessLookupError:
- if os.path.exists(pid_file):
- # File disappeared between reading the PID and checking if
- # the PID is running.
- hookenv.log("Cassandra is not running, but pid file exists.",
- WARNING)
- else:
- hookenv.log("Cassandra is not running. PID file does not exist.")
- return False
-
-
-def get_auth_keyspace_replication(session):
- if has_cassandra_version('3.0'):
- statement = dedent('''\
- SELECT replication FROM system_schema.keyspaces
- WHERE keyspace_name='system_auth'
- ''')
- r = query(session, statement, ConsistencyLevel.QUORUM)
- return dict(r[0][0])
- else:
- statement = dedent('''\
- SELECT strategy_options FROM system.schema_keyspaces
- WHERE keyspace_name='system_auth'
- ''')
- r = query(session, statement, ConsistencyLevel.QUORUM)
- return json.loads(r[0][0])
-
-
-@logged
-def set_auth_keyspace_replication(session, settings):
- # Live operation, so keep status the same.
- status_set(hookenv.status_get()[0],
- 'Updating system_auth rf to {!r}'.format(settings))
- statement = 'ALTER KEYSPACE system_auth WITH REPLICATION = %s'
- query(session, statement, ConsistencyLevel.ALL, (settings,))
-
-
-@logged
-def repair_auth_keyspace():
- # Repair takes a long time, and may need to be retried due to 'snapshot
- # creation' errors, but should certainly complete within an hour since
- # the keyspace is tiny.
- status_set(hookenv.status_get()[0],
- 'Repairing system_auth keyspace')
- nodetool('repair', 'system_auth', timeout=3600)
-
-
-def is_bootstrapped(unit=None):
- '''Return True if the node has already bootstrapped into the cluster.'''
- if unit is None or unit == hookenv.local_unit():
- return hookenv.config().get('bootstrapped', False)
- elif coordinator.relid:
- return bool(hookenv.relation_get(rid=coordinator.relid,
- unit=unit).get('bootstrapped'))
- else:
- return False
-
-
-def set_bootstrapped():
- # We need to store this flag in two locations. The peer relation,
- # so peers can see it, and local state, for when we haven't joined
- # the peer relation yet. actions.publish_bootstrapped_flag()
- # calls this method again when necessary to ensure that state is
- # propagated # if/when the peer relation is joined.
- config = hookenv.config()
- config['bootstrapped'] = True
- if coordinator.relid is not None:
- hookenv.relation_set(coordinator.relid, bootstrapped="1")
- if config.changed('bootstrapped'):
- hookenv.log('Bootstrapped')
- else:
- hookenv.log('Already bootstrapped')
-
-
-def get_bootstrapped():
- units = [hookenv.local_unit()]
- if coordinator.relid is not None:
- units.extend(hookenv.related_units(coordinator.relid))
- return set([unit for unit in units if is_bootstrapped(unit)])
-
-
-def get_bootstrapped_ips():
- return set([unit_to_ip(unit) for unit in get_bootstrapped()])
-
-
-def unit_to_ip(unit):
- if unit is None or unit == hookenv.local_unit():
- return hookenv.unit_private_ip()
- elif coordinator.relid:
- pa = hookenv.relation_get(rid=coordinator.relid,
- unit=unit).get('private-address')
- return hookenv._ensure_ip(pa)
- else:
- return None
-
-
-def get_node_status():
- '''Return the Cassandra node status.
-
- May be NORMAL, JOINING, DECOMMISSIONED etc., or None if we can't tell.
- '''
- if not is_cassandra_running():
- return None
- raw = nodetool('netstats')
- m = re.search(r'(?m)^Mode:\s+(\w+)$', raw)
- if m is None:
- return None
- return m.group(1).upper()
-
-
-def is_decommissioned():
- status = get_node_status()
- if status in ('DECOMMISSIONED', 'LEAVING'):
- hookenv.log('This node is {}'.format(status), WARNING)
- return True
- return False
-
-
-@logged
-def emit_describe_cluster():
- '''Run nodetool describecluster for the logs.'''
- nodetool('describecluster') # Implicit emit
-
-
-@logged
-def emit_status():
- '''Run 'nodetool status' for the logs.'''
- nodetool('status') # Implicit emit
-
-
-@logged
-def emit_netstats():
- '''Run 'nodetool netstats' for the logs.'''
- nodetool('netstats') # Implicit emit
-
-
-def emit_cluster_info():
- emit_describe_cluster()
- emit_status()
- emit_netstats()
-
-
-# FOR CHARMHELPERS (and think of a better name)
-def week_spread(unit_num):
- '''Pick a time for a unit's weekly job.
-
- Jobs are spread out evenly throughout the week as best we can.
- The chosen time only depends on the unit number, and does not change
- if other units are added and removed; while the chosen time will not
- be perfect, we don't have to worry about skipping a weekly job if
- units are added or removed at the wrong moment.
-
- Returns (dow, hour, minute) suitable for cron.
- '''
- def vdc(n, base=2):
- '''Van der Corpet sequence. 0, 0.5, 0.25, 0.75, 0.125, 0.625, ...
-
- http://rosettacode.org/wiki/Van_der_Corput_sequence#Python
- '''
- vdc, denom = 0, 1
- while n:
- denom *= base
- n, remainder = divmod(n, base)
- vdc += remainder / denom
- return vdc
- # We could use the vdc() function to distribute jobs evenly throughout
- # the week, so unit 0==0, unit 1==3.5days, unit 2==1.75 etc. But
- # plain modulo for the day of week is easier for humans and what
- # you expect for 7 units or less.
- sched_dow = unit_num % 7
- # We spread time of day so each batch of 7 units gets the same time,
- # as far spread out from the other batches of 7 units as possible.
- minutes_in_day = 24 * 60
- sched = timedelta(minutes=int(minutes_in_day * vdc(unit_num // 7)))
- sched_hour = sched.seconds // (60 * 60)
- sched_minute = sched.seconds // 60 - sched_hour * 60
- return (sched_dow, sched_hour, sched_minute)
-
-
-# FOR CHARMHELPERS. This should be a constant in nrpe.py
-def local_plugins_dir():
- return '/usr/local/lib/nagios/plugins'
-
-
-def leader_ping():
- '''Make a change in the leader settings, waking the non-leaders.'''
- assert hookenv.is_leader()
- last = int(hookenv.leader_get('ping') or 0)
- hookenv.leader_set(ping=str(last + 1))
-
-
-def get_unit_superusers():
- '''Return the set of units that have had their superuser accounts created.
- '''
- raw = hookenv.leader_get('superusers')
- return set(json.loads(raw or '[]'))
-
-
-def set_unit_superusers(superusers):
- hookenv.leader_set(superusers=json.dumps(sorted(superusers)))
-
-
-def status_set(state, message):
- '''Set the unit status and log a message.'''
- hookenv.status_set(state, message)
- hookenv.log('{} unit state: {}'.format(state, message))
-
-
-def service_status_set(state, message):
- '''Set the service status and log a message.'''
- subprocess.check_call(['status-set', '--service', state, message])
- hookenv.log('{} service state: {}'.format(state, message))
-
-
-def get_service_name(relid):
- '''Return the service name for the other end of relid.'''
- units = hookenv.related_units(relid)
- if units:
- return units[0].split('/', 1)[0]
- else:
- return None
-
-
-def peer_relid():
- return coordinator.relid
-
-
-@logged
-def set_active():
- '''Set happy state'''
- if hookenv.unit_private_ip() in get_seed_ips():
- msg = 'Live seed'
- else:
- msg = 'Live node'
- status_set('active', msg)
-
- if hookenv.is_leader():
- n = num_nodes()
- if n == 1:
- n = 'Single'
- service_status_set('active', '{} node cluster'.format(n))
-
-
-def update_hosts_file(hosts_file, hosts_map):
- """Older versions of Cassandra need own hostname resolution."""
- with open(hosts_file, 'r') as hosts:
- lines = hosts.readlines()
-
- newlines = []
- for ip, hostname in hosts_map.items():
- if not ip or not hostname:
- continue
-
- keepers = []
- for line in lines:
- _line = line.split()
- if len(_line) < 2 or not (_line[0] == ip or hostname in _line[1:]):
- keepers.append(line)
- else:
- hookenv.log('Marking line {!r} for update or removal'
- ''.format(line.strip()), level=DEBUG)
-
- lines = keepers
- newlines.append('{} {}\n'.format(ip, hostname))
-
- lines += newlines
-
- with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
- with open(tmpfile.name, 'w') as hosts:
- for line in lines:
- hosts.write(line)
-
- os.rename(tmpfile.name, hosts_file)
- os.chmod(hosts_file, 0o644)
diff --git a/charms/trusty/cassandra/hooks/hooks.py b/charms/trusty/cassandra/hooks/hooks.py
deleted file mode 100644
index ef38c20..0000000
--- a/charms/trusty/cassandra/hooks/hooks.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from charmhelpers import fetch
-from charmhelpers.core import hookenv
-
-
-def set_proxy():
- import os
- config = hookenv.config()
- if config['http_proxy']:
- os.environ['ftp_proxy'] = config['http_proxy']
- os.environ['http_proxy'] = config['http_proxy']
- os.environ['https_proxy'] = config['http_proxy']
-
-
-def bootstrap():
- try:
- import bcrypt # NOQA: flake8
- import cassandra # NOQA: flake8
- except ImportError:
- packages = ['python3-bcrypt', 'python3-cassandra']
- set_proxy()
- fetch.configure_sources(update=True)
- fetch.apt_install(packages,fatal=True)
- import bcrypt # NOQA: flake8
- import cassandra # NOQA: flake8
-
-
-def default_hook():
- if not hookenv.has_juju_version('1.24'):
- hookenv.status_set('blocked', 'Requires Juju 1.24 or higher')
- # Error state, since we don't have 1.24 to give a nice blocked state.
- raise SystemExit(1)
-
- # These need to be imported after bootstrap() or required Python
- # packages may not have been installed.
- import definitions
-
- # Only useful for debugging, or perhaps have this enabled with a config
- # option?
- # from loglog import loglog
- # loglog('/var/log/cassandra/system.log', prefix='C*: ')
-
- hookenv.log('*** {} Hook Start'.format(hookenv.hook_name()))
- sm = definitions.get_service_manager()
- sm.manage()
- hookenv.log('*** {} Hook Done'.format(hookenv.hook_name()))
diff --git a/charms/trusty/cassandra/hooks/install b/charms/trusty/cassandra/hooks/install
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/install
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/leader-elected b/charms/trusty/cassandra/hooks/leader-elected
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/leader-elected
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/leader-settings-changed b/charms/trusty/cassandra/hooks/leader-settings-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/leader-settings-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/loglog.py b/charms/trusty/cassandra/hooks/loglog.py
deleted file mode 100644
index 33f3af8..0000000
--- a/charms/trusty/cassandra/hooks/loglog.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import atexit
-import subprocess
-import threading
-import time
-
-from charmhelpers.core import hookenv
-
-
-def loglog(filename, prefix='', level=hookenv.DEBUG):
- '''Mirror an arbitrary log file to the Juju hook log in the background.'''
- tailproc = subprocess.Popen(['tail', '-F', filename],
- stdout=subprocess.PIPE,
- universal_newlines=True)
- atexit.register(tailproc.terminate)
-
- def loglog_t(tailproc=tailproc):
- while True:
- line = tailproc.stdout.readline()
- if line:
- hookenv.log('{}{}'.format(prefix, line), level)
- else:
- time.sleep(0.1)
- continue
-
- t = threading.Thread(target=loglog_t, daemon=True)
- t.start()
diff --git a/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed b/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/relations.py b/charms/trusty/cassandra/hooks/relations.py
deleted file mode 100644
index f7870a1..0000000
--- a/charms/trusty/cassandra/hooks/relations.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os.path
-
-import yaml
-
-from charmhelpers.core import hookenv, host
-from charmhelpers.core.hookenv import log, WARNING
-from charmhelpers.core.services.helpers import RelationContext
-
-from coordinator import coordinator
-
-
-class PeerRelation(RelationContext):
- interface = 'cassandra-cluster'
- name = 'cluster'
-
- def is_ready(self):
- # All units except the leader need to wait until the peer
- # relation is available.
- if coordinator.relid is not None or hookenv.is_leader():
- return True
- return False
-
-
-# FOR CHARMHELPERS (if we can integrate Juju 1.24 storage too)
-class StorageRelation(RelationContext):
- '''Wait for the block storage mount to become available.
-
- Charms using this should add a 'wait_for_storage_broker' boolean
- configuration option in their config.yaml file. This is necessary
- to avoid potential data loss race conditions, because otherwise a
- unit will be started up using local disk before it becomes aware
- that it should be using external storage.
-
- 'relname' is the relation name.
-
- 'mountpount' is the mountpoint. Use the default if you have a single
- block storage broker relation. The default is calculated to avoid
- configs using the unit name (/srv/${service}_${unitnumber}).
- '''
- interface = 'block-storage'
- mountpoint = None
-
- def __init__(self, name=None, mountpoint=None):
- if name is None:
- name = self._get_relation_name()
- super(StorageRelation, self).__init__(name)
-
- if mountpoint is None:
- mountpoint = os.path.join('/srv/',
- hookenv.local_unit().replace('/', '_'))
- self._requested_mountpoint = mountpoint
-
- if len(self.get('data', [])) == 0:
- self.mountpoint = None
- elif mountpoint == self['data'][0].get('mountpoint', None):
- self.mountpoint = mountpoint
- else:
- self.mountpoint = None
-
- def _get_relation_name(self):
- with open(os.path.join(hookenv.charm_dir(),
- 'metadata.yaml'), 'r') as mdf:
- md = yaml.safe_load(mdf)
- for section in ['requires', 'provides']:
- for relname in md.get(section, {}).keys():
- if md[section][relname]['interface'] == 'block-storage':
- return relname
- raise LookupError('No block-storage relation defined')
-
- def is_ready(self):
- if hookenv.config('wait_for_storage_broker'):
- if self.mountpoint:
- log("External storage mounted at {}".format(self.mountpoint))
- return True
- else:
- log("Waiting for block storage broker to mount {}".format(
- self._requested_mountpoint), WARNING)
- return False
- return True
-
- def provide_data(self, remote_service, service_ready):
- hookenv.log('Requesting mountpoint {} from {}'
- .format(self._requested_mountpoint, remote_service))
- return dict(mountpoint=self._requested_mountpoint)
-
- def needs_remount(self):
- config = hookenv.config()
- return config.get('live_mountpoint') != self.mountpoint
-
- def migrate(self, src_dir, subdir):
- assert self.needs_remount()
- assert subdir, 'Can only migrate to a subdirectory on a mount'
-
- config = hookenv.config()
- config['live_mountpoint'] = self.mountpoint
-
- if self.mountpoint is None:
- hookenv.log('External storage AND DATA gone.'
- 'Reverting to original local storage', WARNING)
- return
-
- dst_dir = os.path.join(self.mountpoint, subdir)
- if os.path.exists(dst_dir):
- hookenv.log('{} already exists. Not migrating data.'.format(
- dst_dir))
- return
-
- # We are migrating the contents of src_dir, so we want a
- # trailing slash to ensure rsync's behavior.
- if not src_dir.endswith('/'):
- src_dir += '/'
-
- # We don't migrate data directly into the new destination,
- # which allows us to detect a failed migration and recover.
- tmp_dst_dir = dst_dir + '.migrating'
- hookenv.log('Migrating data from {} to {}'.format(
- src_dir, tmp_dst_dir))
- host.rsync(src_dir, tmp_dst_dir, flags='-av')
-
- hookenv.log('Moving {} to {}'.format(tmp_dst_dir, dst_dir))
- os.rename(tmp_dst_dir, dst_dir)
-
- assert not self.needs_remount()
diff --git a/charms/trusty/cassandra/hooks/stop b/charms/trusty/cassandra/hooks/stop
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/stop
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/hooks/upgrade-charm b/charms/trusty/cassandra/hooks/upgrade-charm
deleted file mode 100755
index 9128cab..0000000
--- a/charms/trusty/cassandra/hooks/upgrade-charm
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/python3
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import hooks
-if __name__ == '__main__':
- hooks.bootstrap()
- hooks.default_hook()
diff --git a/charms/trusty/cassandra/icon.svg b/charms/trusty/cassandra/icon.svg
deleted file mode 100644
index 7615021..0000000
--- a/charms/trusty/cassandra/icon.svg
+++ /dev/null
@@ -1,650 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.48+devel r12304"
- sodipodi:docname="cassandra01.svg">
- <defs
- id="defs6519">
- <linearGradient
- inkscape:collect="always"
- id="linearGradient1105">
- <stop
- style="stop-color:#4ba6bd;stop-opacity:1"
- offset="0"
- id="stop1107" />
- <stop
- style="stop-color:#94ccda;stop-opacity:1"
- offset="1"
- id="stop1109" />
- </linearGradient>
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#d3effc;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#e8f3f8;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- <clipPath
- id="clipPath2832">
- <path
- id="path2834"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2844">
- <path
- id="path2846"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2852">
- <path
- id="path2854"
- d="m 96.0078,715.93 88.2902,0 0,-62.176 -88.2902,0 0,62.176 z" />
- </clipPath>
- <clipPath
- id="clipPath2868">
- <path
- id="path2870"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2880">
- <path
- id="path2882"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2908">
- <path
- id="path2910"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2936">
- <path
- id="path2938"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2944">
- <path
- id="path2946"
- d="m 121.202,708.378 45.899,0 0,-45.859 -45.899,0 0,45.859 z" />
- </clipPath>
- <clipPath
- id="clipPath2960">
- <path
- id="path2962"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2968">
- <path
- id="path2970"
- d="m 40.4033,726.188 212.4017,0 0,-61.818 -212.4017,0 0,61.818 z" />
- </clipPath>
- <clipPath
- id="clipPath2988">
- <path
- id="path2990"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <clipPath
- id="clipPath2996">
- <path
- id="path2998"
- d="m 39.5195,688.644 199.3805,0 0,-73.818 -199.3805,0 0,73.818 z" />
- </clipPath>
- <clipPath
- id="clipPath3016">
- <path
- id="path3018"
- d="M 0,792 612,792 612,0 0,0 0,792 z" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient1105"
- id="linearGradient1111"
- x1="-220"
- y1="731.29077"
- x2="-220"
- y2="635.29077"
- gradientUnits="userSpaceOnUse" />
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="3.259629"
- inkscape:cx="51.812246"
- inkscape:cy="27.005007"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1029"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:url(#linearGradient1111);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- <g
- id="g1012"
- transform="matrix(0.31314985,0,0,0.31314985,-263.71323,659.42827)">
- <g
- style="display:inline"
- id="layer2-4"
- transform="translate(-62.668647,-74.06425)">
- <g
- id="g4555"
- transform="matrix(1.25,0,0,-1.25,19.117647,990)">
- <g
- id="g2828">
- <g
- id="g2830"
- clip-path="url(#clipPath2832)">
- <g
- id="g2836"
- transform="translate(210.8784,690.4834)">
- <path
- style="fill:#bbe6fb;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2838"
- d="m 0,0 c 1.584,-18.452 -27.455,-36.014 -64.859,-39.223 -37.404,-3.209 -69.01,9.151 -70.592,27.602 -1.584,18.455 27.455,36.016 64.859,39.225 C -33.188,30.812 -1.582,18.455 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- <g
- id="g2840">
- <g
- id="g2842"
- clip-path="url(#clipPath2844)">
- <g
- id="g2848">
- <g
- id="g2850" />
- <g
- id="g2856">
- <g
- style="opacity:0.35000604"
- id="g2858"
- clip-path="url(#clipPath2852)">
- <g
- id="g2860"
- transform="translate(141.3843,715.9233)">
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2862"
- d="m 0,0 c -14.268,0.232 -30.964,-5.433 -43.387,-10.738 -1.293,-3.726 -1.989,-7.689 -1.989,-11.797 0,-21.888 19.764,-39.634 44.145,-39.634 24.381,0 44.145,17.746 44.145,39.634 0,6.927 -1.984,13.435 -5.463,19.101 C 27.512,-1.889 13.842,-0.225 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- </g>
- </g>
- </g>
- <g
- id="g2864">
- <g
- id="g2866"
- clip-path="url(#clipPath2868)">
- <g
- id="g2872"
- transform="translate(140.1528,715.9277)">
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2874"
- d="m 0,0 c -7.899,0.482 -21.514,-3.639 -32.867,-7.75 -1.725,-4.071 -2.683,-8.526 -2.683,-13.201 0,-19.178 17.388,-34.725 35.782,-34.725 18.273,0 34.44,15.572 35.782,34.725 0.436,6.237 -1.711,12.114 -4.692,17.181 C 19.552,-1.697 7.061,-0.431 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- <g
- id="g2876">
- <g
- id="g2878"
- clip-path="url(#clipPath2880)">
- <g
- id="g2884"
- transform="translate(119.8818,697.4946)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2886"
- d="M 0,0 C 0.969,2.146 2.437,3.197 3.859,4.996 3.701,5.422 3.355,6.815 3.355,7.298 c 0,2.156 1.749,3.906 3.906,3.906 0.509,0 0.995,-0.101 1.44,-0.278 6.465,4.927 14.976,7.075 23.529,5.163 0.781,-0.176 1.547,-0.389 2.299,-0.623 C 26.076,16.638 16.548,13.644 10.067,8.413 10.265,7.946 10.814,6.611 10.814,6.074 10.814,3.917 9.418,3.392 7.261,3.392 6.771,3.392 6.303,3.486 5.87,3.651 4.406,1.685 2.612,-2.06 1.734,-4.401 c 3.584,-3.206 6.822,-4.368 11.042,-5.945 -0.011,0.201 0.145,0.387 0.145,0.592 0,6.503 5.725,11.788 12.229,11.788 5.828,0 10.654,-4.238 11.596,-9.798 2.908,1.85 5.72,3.268 7.863,6.01 -0.5,0.61 -1.039,2.337 -1.039,3.187 0,1.957 1.588,3.544 3.545,3.544 0.277,0 0.543,-0.04 0.802,-0.1 1.088,2.236 1.909,4.606 2.434,7.05 -10.17,7.529 -29.847,6.502 -29.847,6.502 0,0 -15.658,0.817 -26.258,-4.349 C -5.047,8.969 -3.008,4.11 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2888"
- transform="translate(168.4907,700.4282)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2890"
- d="m 0,0 c 0.719,-0.648 1.111,-1.217 1.42,-1.771 0.951,-1.71 -0.957,-3.275 -2.914,-3.275 -0.199,0 -0.391,0.027 -0.582,0.059 -2.205,-3.446 -6.067,-7.865 -9.498,-10.089 5.261,-0.862 10.222,-2.969 14.17,-6.225 2.875,5.151 5.08,12.589 5.08,18.907 0,4.809 -2.123,8.334 -5.328,10.92 C 2.18,5.95 0.805,2.347 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2892"
- transform="translate(125.7842,667.8032)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2894"
- d="M 0,0 C 1.753,4.841 6.065,8.592 10.144,11.892 9.547,12.709 8.652,14.732 8.279,15.69 3.304,17.203 -1.098,20.035 -4.512,23.784 -4.537,23.675 -4.568,23.569 -4.594,23.46 -5.237,20.579 -5.355,17.692 -5.035,14.876 -2.653,14.432 -0.85,12.345 -0.85,9.834 -0.85,8.345 -2.155,6.187 -3.168,5.248 -2.067,2.872 -1.316,1.726 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2896"
- transform="translate(125.4756,663.7393)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2898"
- d="m 0,0 c -2.091,2.079 -3.537,6.226 -4.894,8.83 -0.254,-0.039 -0.514,-0.066 -0.78,-0.066 -2.836,0 -5.807,2.38 -5.135,5.134 0.372,1.524 1.424,2.521 3.137,3.353 -0.39,3.157 -0.496,7.695 0.237,10.977 0.21,0.939 0.655,1.379 0.95,2.273 -3.129,4.579 -5.151,10.589 -5.151,16.552 0,0.218 0.011,0.433 0.016,0.649 -5.288,-2.652 -9.253,-6.83 -9.253,-13.407 0,-14.548 8.379,-28.819 20.846,-34.413 C -0.018,-0.079 -0.01,-0.039 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2900"
- transform="translate(156.1313,683.8511)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2902"
- d="m 0,0 c -1.611,-4.582 -5.967,-7.873 -11.1,-7.873 -2.746,0 -5.265,0.947 -7.267,2.521 -4.127,-3.214 -7.871,-8.86 -9.774,-13.758 0.854,-0.919 1.449,-1.675 2.407,-2.49 2.887,-0.752 6.863,0 9.988,0 12.57,0 23.703,5.592 30.086,15.398 C 10.096,-3.263 5.09,-0.466 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- <g
- id="g2904">
- <g
- id="g2906"
- clip-path="url(#clipPath2908)">
- <g
- id="g2912"
- transform="translate(119.5596,695.7944)">
- <path
- style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2914"
- d="m 0,0 c 0.969,2.146 2.184,4.132 3.605,5.931 -0.158,0.425 -0.25,0.884 -0.25,1.367 0,2.156 1.749,3.906 3.906,3.906 0.509,0 0.995,-0.101 1.44,-0.278 6.465,4.927 14.976,7.075 23.529,5.163 0.781,-0.176 1.547,-0.389 2.299,-0.623 -8.453,1.172 -17.187,-1.419 -23.668,-6.651 0.198,-0.466 0.306,-0.98 0.306,-1.517 0,-2.157 -1.749,-3.906 -3.906,-3.906 -0.49,0 -0.958,0.094 -1.391,0.259 -1.464,-1.966 -2.661,-4.138 -3.539,-6.48 3.078,-3.317 6.856,-5.94 11.075,-7.517 -0.01,0.201 -0.031,0.4 -0.031,0.605 0,6.503 5.271,11.775 11.775,11.775 5.828,0 10.654,-4.238 11.596,-9.798 2.908,1.85 5.492,4.226 7.634,6.968 -0.5,0.61 -0.81,1.379 -0.81,2.229 0,1.957 1.588,3.544 3.545,3.544 0.277,0 0.543,-0.04 0.802,-0.1 1.088,2.236 1.909,4.606 2.434,7.05 -10.17,7.529 -29.847,6.502 -29.847,6.502 0,0 -15.658,0.817 -26.258,-4.349 C -5.047,8.969 -3.008,4.11 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2916"
- transform="translate(169.0396,699.8481)">
- <path
- style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2918"
- d="m 0,0 c 0.719,-0.648 1.18,-1.577 1.18,-2.621 0,-1.957 -1.588,-3.545 -3.545,-3.545 -0.199,0 -0.391,0.027 -0.582,0.059 -2.205,-3.446 -5.127,-6.384 -8.559,-8.608 5.072,-0.793 9.846,-2.945 13.793,-6.201 2.875,5.151 4.518,11.084 4.518,17.402 0,4.809 -2.123,8.334 -5.328,10.92 C 1.309,4.83 0.805,2.347 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2920"
- transform="translate(126.3252,666.6401)">
- <path
- style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2922"
- d="M 0,0 C 1.753,4.841 4.799,9.185 8.878,12.484 8.281,13.302 7.789,14.195 7.416,15.153 2.44,16.666 -1.961,19.498 -5.375,23.247 -5.4,23.138 -5.432,23.032 -5.457,22.923 -6.101,20.042 -6.219,17.155 -5.898,14.339 -3.517,13.895 -1.713,11.808 -1.713,9.297 -1.713,7.808 -2.352,6.469 -3.365,5.53 -2.446,3.582 -1.316,1.726 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2924"
- transform="translate(125.4619,663.7983)">
- <path
- style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2926"
- d="m 0,0 c -2.091,2.079 -3.846,4.467 -5.202,7.07 -0.255,-0.039 -0.515,-0.065 -0.78,-0.065 -2.836,0 -5.135,2.299 -5.135,5.134 0,2.032 1.184,3.784 2.897,4.616 -0.389,3.156 -0.257,6.432 0.477,9.714 0.21,0.938 0.466,1.854 0.761,2.749 -3.129,4.578 -4.962,10.113 -4.962,16.076 0,0.218 0.01,0.433 0.015,0.648 -5.288,-2.651 -9.253,-6.83 -9.253,-13.406 0,-14.549 8.688,-27.06 21.155,-32.654 C -0.018,-0.079 -0.01,-0.039 0,0"
- inkscape:connector-curvature="0" />
- </g>
- <g
- id="g2928"
- transform="translate(155.8091,682.1509)">
- <path
- style="fill:#1287b1;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2930"
- d="m 0,0 c -1.611,-4.582 -5.967,-7.873 -11.1,-7.873 -2.746,0 -5.265,0.947 -7.267,2.521 -4.127,-3.214 -7.242,-7.595 -9.144,-12.494 0.853,-0.919 1.765,-1.785 2.723,-2.599 2.888,-0.752 5.917,-1.155 9.042,-1.155 12.57,0 23.621,6.49 30.004,16.295 C 10.014,-2.365 5.09,-0.466 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- <g
- id="g2932">
- <g
- id="g2934"
- clip-path="url(#clipPath2936)">
- <g
- id="g2940">
- <g
- id="g2942" />
- <g
- id="g2948">
- <g
- id="g2950"
- clip-path="url(#clipPath2944)">
- <g
- id="g2952"
- transform="translate(156.2222,685.187)">
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2954"
- d="M 0,0 10.879,2.595 -0.041,3.152 8.846,9.944 -1.238,6.329 5.615,15.826 -3.85,9.535 l 3.309,11.117 -6.5,-9.163 -0.148,11.579 -4.277,-10.314 -3.566,10.437 0.193,-12.295 -6.163,11.021 3.335,-11.702 -9.997,7.27 7.831,-9.84 -12.411,4.564 9.795,-7.247 -12.56,-0.386 12.842,-3.314 -12.853,-2.779 12.687,-0.92 -10.699,-6.851 11.017,3.994 -7.644,-9.681 9.659,7.79 -3.478,-12.991 7.457,10.572 -1.045,-12.486 4.233,11.319 3.603,-11.897 0.876,11.933 5.348,-10.181 -3.16,11.645 9.793,-7.586 -6.322,9.672 10.744,-4.186 -8.215,8.073 L 10.85,-4.164 0,0 Z"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- </g>
- </g>
- </g>
- <g
- id="g2956">
- <g
- id="g2958"
- clip-path="url(#clipPath2960)">
- <g
- id="g2964">
- <g
- id="g2966" />
- <g
- id="g2972">
- <g
- style="opacity:0.35000604"
- id="g2974"
- clip-path="url(#clipPath2968)">
- <g
- id="g2976"
- transform="translate(40.4033,664.3701)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2978"
- d="m 0,0 c 33.74,33.739 60.687,44.155 85.143,48.91 3.236,0.629 3.848,7.7 3.848,7.7 0,0 0.453,-5.208 2.718,-5.887 2.264,-0.68 5.207,8.152 5.207,8.152 0,0 -2.717,-7.926 0,-8.379 2.718,-0.453 7.699,7.699 7.699,7.699 0,0 -2.037,-7.019 -0.678,-7.472 1.357,-0.453 8.15,10.189 8.15,10.189 0,0 -4.076,-7.019 -0.226,-7.699 3.851,-0.679 9.467,4.791 9.467,4.791 0,0 -4.416,-5.005 -2.448,-5.696 8.379,-2.945 15.159,7.945 15.159,7.945 0,0 -1.571,-4.775 -5.647,-9.983 8.83,-2.264 15.389,11.039 15.389,11.039 l -6.559,-13.303 c 3.397,-1.813 16.985,13.812 16.985,13.812 0,0 -7.02,-12.228 -11.096,-14.718 2.264,-1.812 10.416,5.434 10.416,5.434 0,0 -6.567,-8.151 -4.076,-8.604 3.623,-2.944 16.982,15.171 16.982,15.171 0,0 -5.207,-10.642 -12.906,-19.021 6.435,-3.219 22.418,17.436 22.418,17.436 0,0 -0.453,-6.567 -12.002,-16.983 8.605,1.132 19.701,17.436 19.701,17.436 0,0 -4.076,-12.228 -13.814,-20.832 8.449,0.879 21.964,21.738 21.964,21.738 0,0 -5.207,-14.492 -15.849,-22.871 11.775,-2.604 28.758,14.945 28.758,14.945 0,0 -6.68,-12.455 -15.399,-17.549 9.738,-3.736 23.098,11.662 23.098,11.662 0,0 -13.36,-20.607 -34.645,-19.701 -6.984,0.297 -28.109,21.188 -73.368,19.474 C 44.609,42.57 31.929,17.209 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- </g>
- <g
- id="g2980"
- transform="translate(41.7861,666.9326)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path2982"
- d="m 0,0 c 33.74,33.739 60.686,44.154 85.142,48.91 3.237,0.629 3.849,7.699 3.849,7.699 0,0 0.452,-5.209 2.718,-5.887 2.264,-0.679 5.207,8.151 5.207,8.151 0,0 -2.717,-7.926 0,-8.378 2.718,-0.452 7.699,7.699 7.699,7.699 0,0 -2.037,-7.019 -0.68,-7.472 1.359,-0.453 8.152,10.19 8.152,10.19 0,0 -4.076,-7.02 -0.226,-7.699 3.849,-0.68 9.467,4.79 9.467,4.79 0,0 -4.416,-5.005 -2.448,-5.696 8.379,-2.944 15.157,7.945 15.157,7.945 0,0 -1.571,-4.775 -5.645,-9.983 8.83,-2.265 15.389,11.04 15.389,11.04 l -6.559,-13.305 c 3.397,-1.811 16.983,13.812 16.983,13.812 0,0 -7.018,-12.226 -11.094,-14.717 2.264,-1.812 10.416,5.434 10.416,5.434 0,0 -6.567,-8.152 -4.076,-8.604 3.623,-2.945 16.982,15.171 16.982,15.171 0,0 -5.209,-10.643 -12.906,-19.021 6.435,-3.22 22.418,17.436 22.418,17.436 0,0 -0.453,-6.568 -12.002,-16.984 8.605,1.133 19.701,17.437 19.701,17.437 0,0 -4.076,-12.228 -13.814,-20.833 8.449,0.879 21.964,21.738 21.964,21.738 0,0 -5.207,-14.492 -15.849,-22.87 11.775,-2.604 28.758,14.944 28.758,14.944 0,0 -6.68,-12.453 -15.399,-17.548 9.738,-3.736 23.098,11.662 23.098,11.662 0,0 -13.36,-20.607 -34.647,-19.701 -6.982,0.298 -28.107,21.189 -73.367,19.474 C 44.609,42.57 31.928,17.209 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- <g
- id="g2984">
- <g
- id="g2986"
- clip-path="url(#clipPath2988)">
- <g
- id="g2992">
- <g
- id="g2994" />
- <g
- id="g3000">
- <g
- style="opacity:0.35000604"
- id="g3002"
- clip-path="url(#clipPath2996)">
- <g
- id="g3004"
- transform="translate(39.5195,660.6802)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path3006"
- d="m 0,0 c 17.832,-8.945 34.137,1.358 54.686,-4.433 15.623,-4.404 34.645,-9.833 60.458,-6.096 25.814,3.735 47.893,14.944 58.424,34.985 3.283,8.943 16.642,-2.039 16.642,-2.039 0,0 -9.736,4.076 -9.509,2.151 0.226,-1.924 14.605,-8.604 14.605,-8.604 0,0 -13.021,4.076 -12.228,1.019 0.793,-3.057 16.302,-15.285 16.302,-15.285 0,0 -17.548,13.36 -19.019,11.549 -1.473,-1.812 7.472,-9.172 7.472,-9.172 0,0 -14.832,9.172 -20.041,6.467 -3.746,-1.943 15.399,-14.506 15.399,-14.506 0,0 -12.455,9.512 -15.399,7.021 -2.943,-2.492 14.04,-22.871 14.04,-22.871 0,0 -19.249,20.833 -21.172,19.814 -1.926,-1.019 5.32,-10.983 5.32,-10.983 0,0 -9.51,10.417 -12.113,8.605 -2.604,-1.812 13.586,-28.871 13.586,-28.871 0,0 -17.549,27.738 -24.795,23.098 11.379,-24.966 7.133,-28.533 7.133,-28.533 0,0 -1.452,25.47 -15.625,24.796 -7.133,-0.34 3.396,-19.021 3.396,-19.021 0,0 -9.691,17.062 -16.145,16.722 11.895,-22.511 7.655,-31.667 7.655,-31.667 0,0 1.967,19.226 -14.166,29.925 6.113,-5.433 -3.836,-29.925 -3.836,-29.925 0,0 8.752,36.091 -6.455,29.21 -2.403,-1.085 -0.17,-18.002 -0.17,-18.002 0,0 -3.057,19.362 -7.641,18.342 -2.673,-0.593 -16.984,-26.833 -16.984,-26.833 0,0 11.719,28.362 8.153,27.173 -2.598,-0.867 -7.473,-12.568 -7.473,-12.568 0,0 2.377,11.549 0,12.228 -2.377,0.68 -15.625,-12.228 -15.625,-12.228 0,0 9.851,11.549 8.152,13.927 -2.574,3.603 -5.591,3.772 -9.171,2.377 -5.209,-2.03 -12.227,-11.548 -12.227,-11.548 0,0 6.996,9.637 5.773,13.247 -1.963,5.8 -22.077,-11.209 -22.077,-11.209 0,0 11.888,11.209 9.171,13.587 -2.717,2.377 -17.471,1.642 -22.078,1.655 C 8.832,-6.454 4.124,-3.267 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- </g>
- <g
- id="g3008"
- transform="translate(38.8408,662.7183)">
- <path
- style="fill:#373535;fill-opacity:1;fill-rule:nonzero;stroke:none"
- id="path3010"
- d="m 0,0 c 17.832,-8.945 34.136,1.358 54.685,-4.434 15.623,-4.402 34.646,-9.832 60.46,-6.095 25.814,3.736 47.891,14.945 58.422,34.984 3.283,8.944 16.642,-2.037 16.642,-2.037 0,0 -9.736,4.075 -9.509,2.15 0.226,-1.924 14.605,-8.604 14.605,-8.604 0,0 -13.021,4.075 -12.228,1.018 0.793,-3.056 16.304,-15.284 16.304,-15.284 0,0 -17.55,13.361 -19.021,11.548 -1.471,-1.811 7.473,-9.17 7.473,-9.17 0,0 -14.833,9.17 -20.041,6.467 -3.747,-1.944 15.398,-14.506 15.398,-14.506 0,0 -12.455,9.511 -15.398,7.02 -2.944,-2.492 14.041,-22.871 14.041,-22.871 0,0 -19.25,20.833 -21.174,19.814 -1.924,-1.02 5.322,-10.982 5.322,-10.982 0,0 -9.512,10.416 -12.115,8.604 -2.604,-1.811 13.586,-28.871 13.586,-28.871 0,0 -17.549,27.739 -24.795,23.097 11.379,-24.965 7.133,-28.532 7.133,-28.532 0,0 -1.452,25.47 -15.625,24.795 -7.133,-0.34 3.396,-19.02 3.396,-19.02 0,0 -9.691,17.063 -16.144,16.723 11.896,-22.512 7.654,-31.668 7.654,-31.668 0,0 1.967,19.227 -14.166,29.926 6.113,-5.434 -3.836,-29.926 -3.836,-29.926 0,0 8.754,36.091 -6.453,29.21 -2.403,-1.086 -0.17,-18.002 -0.17,-18.002 0,0 -3.059,19.361 -7.642,18.342 -2.674,-0.593 -16.985,-26.833 -16.985,-26.833 0,0 11.719,28.362 8.153,27.172 -2.598,-0.865 -7.473,-12.566 -7.473,-12.566 0,0 2.378,11.548 0,12.227 -2.377,0.679 -15.624,-12.227 -15.624,-12.227 0,0 9.851,11.548 8.151,13.926 -2.574,3.603 -5.591,3.771 -9.17,2.376 -5.21,-2.029 -12.228,-11.547 -12.228,-11.547 0,0 6.996,9.638 5.774,13.247 -1.964,5.799 -22.077,-11.209 -22.077,-11.209 0,0 11.888,11.209 9.17,13.586 C 41.778,-5.774 27.024,-6.51 22.417,-6.496 8.831,-6.453 4.124,-3.267 0,0"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- </g>
- </g>
- </g>
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline" />
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 C 258.62742,540.36218 264,545.73477 264,552.36218 Z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 C 258.62742,540.36218 264,545.73477 264,552.36218 Z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 C 258.62742,540.36218 264,545.73477 264,552.36218 Z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 C 669.9821,591.68426 670.20862,595.55064 669.8173,595.77657 Z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/cassandra/lib/apache.key b/charms/trusty/cassandra/lib/apache.key
deleted file mode 100644
index 6dfb1fd..0000000
--- a/charms/trusty/cassandra/lib/apache.key
+++ /dev/null
@@ -1,53 +0,0 @@
-Apache Cassandra signing key (2014-11-14)
-
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mQINBFQJvgUBEAC0KcYCTj0hd15p4fiXBsbob0sKgsvN5Lm7N9jzJWlGshJ0peMi
-kH8YhDXw5Lh+mPEHksL7t1L8CIr1a+ntns/Opt65ZPO38ENVkOqEVAn9Z5sIoZsb
-7rX3USHJEnp1vAtG6YFTLpHOwgoIhY//D8qHb89MTYeqLHkep9h16vrybfglLLlJ
-qdph+lRC/hpJIdAUCmybym2arnLqBfYJTfU3LyRYQoRPIzrE38Y/oNvYN6mkwCPS
-fwNoPB7hrT0u6otNckxftR01To614+Jnl81RyTePGC/wahrHll7mbMEFw9nlMECt
-GgQYFRPmcojLvMxXnBWBQ4LjTSMvToFCPdnjzMeWkeN6qE1c2/S/qmxOXBSP5DCz
-UKuqAAZZoXTbMcEWYngUioFYUU1TSrK85Q8xqxAOUhYDSfv2brZp7h2lNY1RsQ9L
-6inMwNBTyLnH1b9WXH6XUNH51yUThJzRP2CUYw3P0lATrS62aTpehsnddQ8yWskq
-+7nx6dN+khXqvAm+WyO2NojfeRFgHRIDlESTPkEekWzmZgI3R7ZzEBd6Nqx0k8mh
-ePW7ImChqyazjsZevBxJDOOjzFrvHEVMhQld2aQ3g4SYUZSCBbVZw7GqoqDaN3dH
-Vs1pFCpIOqIGg4qRID27AKvO0YdHDX8tIQIqCkvTrIJD8fJxyBDsXvqn0QARAQAB
-tCBUIEpha2UgTHVjaWFuaSA8amFrZUBhcGFjaGUub3JnPokCOAQTAQIAIgUCVAm+
-BQIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQdJ1u7ANTsSyMIA//Zqhl
-uNA7PSEz6+jhi2H3i8eOg2kNeu4Bz8jLOpu8LhrNzqZp5fbRJ9jchbkVWFeUW9wp
-tQfqPHIB7REMmwojS9Sf0jYmxR45qI26MTFQDb2vucMSVbrxYpK/afZywvYkIVHm
-RBPERyXezJoBfj23Gezft2OPRozs4OrsKfeyFZy+RfHDQkEX51eZ1co5kr1uLd1F
-UbtH42f8cCn9n5RaDAuDWmbHWfDQAHngxb+TAlLvBWEknRstc8lMIf/g6fv9EQBt
-hIaK4NwQhB2QYg4GMUzKR/eHX5nrVu6YtNyXN/rp8P3BoEwc4MRlOGnvJWrrsaqR
-uoxZX1PDHRTmypzygcnkdqciguy4Zz8fYkKjHZQt+CiuNqxkOwCg1dfCwTxCP1uM
-fd9Aw6zEb7cgBwcGqixp0M9zYJFAbPpFJ+/oZHiVQiEdRlB0BXhdWsBWKlBafoXC
-4/esVB7cNeG7kipF4zlFzrQv7xrQhMTV66RmiheiZs6FPNd8sQunrBsrcPqlMnb2
-yf0sX4yXiDxln4zvja0V1SaVKRfL91mLZOPzr3ccGfpog/3OxuzMiOlqVwCJz91g
-aNSg6OY2Wioop0t0LZ3K0TjamYLn4gEx+t7t/9UZZVutd/uWXS+zr5Kz32R2F0mJ
-eE7Gg2S8rMfplsYObSIK3jBXUIMgskNSaNmdX725Ag0EVAm+BQEQAK50pLVwBriP
-dC5chmDvBinbmB3rWN8lTrFb5IOjQxkrhA89d90i9Jjq49RG+7LsHX4E24SvBKZd
-2J5y04LjOeOweTieVSXNz/XqLTQcbat4btP0FCiE5rSCmfT9tGs185ilBEB3UOhQ
-P/0rkURNULHFbiP4WT36bkaSZ3lyP0lH0qckRXW3tez207+Ckpl58TCfKcyMLlky
-D3jO4+n3FfTNYSTLX40dW1MjfYJRQabNcsO9s5qxV1ir8ZqLBfvIG+FoPnVDP+7L
-lk4yEdugiKpOlGKsF9MSy6g0gKd3FO0jr/w0DCacvF3QH0rXqo0KYyb7GgQgtJku
-9ckfJxaW8Aobcj5xhcjgDwR0TeG0xTLlnKsBSebunujqRSWJSXCeEy9yPC2yvznd
-pal/kKt2vQaIAYpvYgvr7kKqvsAH5w5wf+p5wCrb+Pqhv3BTQonDNe/qo4DgsS0/
-VdQKiFvUxQiQUblDQVkpwX6UpPUBzIMsjJHWg4mGZSH4MjA0Qi7SqUCYJJCfvaEa
-0ad/rj3nYivzlXD4oWC9ZzYhIOVus8hvuAAjuK4JOM8MLvhph/dqwNr5kzdKVM4i
-3sStrx2KIsPO2ak702DJUYPjgsz2pe/D46BCpKiuJEAvzOKc0X+PQICwSHnKHkXV
-zbbaonvKdJVepMtE3oa5wqsH6tmP9b+fABEBAAGJAh8EGAECAAkFAlQJvgUCGwwA
-CgkQdJ1u7ANTsSyHNhAAmKsNTiBjadx6JQ94Dsw30EGHCUwpXNK3/+nXzFnKWDhm
-zd9SlFRFhqYiZHTLOK2cFUykcnPWiUepMEPz5JUK3qHvEu9LneZsejbF2yWFKZQ3
-Vu7bAFc8V92u9vwj1q3ZGHe+lEvDhUg4a97tfhFZrCyu0RZ0by+oPabfNbq3QkA1
-1lQDCuvBS8L0Y/oZM6Af+x2+vkERRgsPFmKUbkARnZxzIvlq8hwN0rp+AlyC+7/L
-LwA67Y+ekVimnjPyahCowPIROXNZgl65c4e06zpPgUSnbp5nI5SKp2NlyxNEpYBk
-cElEAqO+qH1oYaDO3QsHtSIq+qi+gWxNBuMNAJphys/82u7arHOgonsCqNvbS6Pt
-iaz3TUrQi+SFa1oQ67Gb1DQZ3EOraEdD/ooFWxEFS2aXo0bKs7nx0VgpxAFsuTN+
-niHnpmaOxdyUP8nQnc+GRMPxTB1/5906ww/PR6aLgW6+Jhc5pNGUI/gBSK7nLReq
-wEWi68zsTb9rh9I+ILdnbM/Loq1vCFLlGgc26U7aRj+N/os5ys5TPkOpuyMoz7Rq
-PkjrM2CQoOMxeLWjXJSwjWeLGPalw2/to9NFClznS6hYUETn2HB/Px0DOMiUzm3B
-AUeLlJzSeRLTKhcOugK7UcsQD2FHnMBJz50bxis9X7pjmnc/tWpjAGJfaWdjDIo=
-=yiQ4
------END PGP PUBLIC KEY BLOCK-----
diff --git a/charms/trusty/cassandra/lib/datastax.key b/charms/trusty/cassandra/lib/datastax.key
deleted file mode 100644
index adb7cd8..0000000
--- a/charms/trusty/cassandra/lib/datastax.key
+++ /dev/null
@@ -1,49 +0,0 @@
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mQENBExkbXsBCACgUAbMWASAz/fmnMoWE4yJ/YHeuFHTK8zloJ/mApwizlQXTIVp
-U4UV8nbLJrbkFY92VTcC2/IBtvnHpZl8eVm/JSI7nojXc5Kmm4Ek/cY7uW2KKPr4
-cuka/5cNsOg2vsgTIMOZT6vWAbag2BGHtEJbriMLhT3v1tlu9caJfybu3QFWpahC
-wRYtG3B4tkypt21ssWwNnmp2bjFRGpLssc5HCCxUCBFLYoIkAGAFRZ6ymglsLDBn
-SCEzCkn9zQfmyqs0lZk4odBx6rzE350xgEnzFktT2uekFYqRqPQY8f7AhVfj2DJF
-gVM4wXbSoVrTnDiFsaJt/Ea4OJ263jRUHeIRABEBAAG0LVJpcHRhbm8gUGFja2Fn
-ZSBSZXBvc2l0b3J5IDxwYXVsQHJpcHRhbm8uY29tPokBPgQTAQIAKAIbAwYLCQgH
-AwIGFQgCCQoLBBYCAwECHgECF4AFAlW/zKMFCRLBYKQACgkQNQIA8rmZo3LebAgA
-gAwWkvBrPaD5Kf8H4uw9rXtHnHYxX5G6cOVJ3vuWCs1ov7m3JWq918q00hWfLtOs
-zb15kFcjcEJ7kiRFJmAXZhcX2I0DHTmTZSl9orKzoUlXQqAANJGdek8pzdTDUQfz
-V26k63d6eLqjXotrb0hFzg7B8VSolxRE44S5k1xhzUCedOqYYsWVv3xnRIP6UBPt
-WLvzrLa0o9x/hT4w81dOP4rzZMuq2RApnenoz9AZwJrmZ14QW2ncy4RbqK6pKdRJ
-y57vBv8F0LkGlLwBd/JYWwQ85lUTkNG5wCWdj0IEYTO3+fGyO1LHU6bVZCrNtkUE
-ahSZUiRdidiktIkbtNXImYkCHAQQAQgABgUCTGRt2QAKCRATbpzxe100LaUfD/9D
-q84HarIQMEoUiRBklg+afgTMaNNdvhU3V59KoMja2vMeE4JjE3SvNoKCHjPZj6Ti
-720KL6V5O/Uo1VjtSXzAPRJywcE9aS5HRjM2Dr1mp5GnmpvbiKBdl91G9aPc3D2Z
-LpG7vZr8E/vYLc5h1DMz2XDqi6gAqW2yxb2vnmHL4FiAdoXfpZimC9KZpUdTsGPO
-VbXEDEn3y/AiIC35Bq66Sp3W4gVNakV7Y5RUPPDDBIsTZEOhzd9nl5FXOnPtONp5
-dtp5NoWl6q3BjYe2P52TloCp+BJ62donfFTRSGfqyvtaRgmnHHEIWgypMghW6wSb
-O/BxFpdggHTItMfBg2a8tWDFjYmBoFd3iP9SfcmBb/7zB5YXC5b1/s3RNCtR76hf
-+iXjm/zy22tb6qy5XJsnCoORjEoFaWNH6ckgACK7HQyJZ2Lo2MuCYYaQLs6gTd6a
-zMEQHT08cPF+I5It9mOzAtUOkCcVK8dIXRFETXFVdQqFMTmZmuK1Iv1CFBeUIHnM
-iyoYv1bzNsUg/hJpW8ximVmBg5Apza2K0p3XKHkw9MPBqnQ4PbBM1nqb/+o56p+o
-8mVZmjn4bdraB8c0Br15Mi19Zne7b65OZ5k+SVripUk5/XeJD9M9U6+DG+/uxemD
-Fzp9XjnnAe8T/u8JpqHYQ2mRONFM7ZMOAFeEe4yIEIkBPgQTAQIAKAUCTGRtewIb
-AwUJA8JnAAYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQNQIA8rmZo3K3HAf/
-V+6OSdt/Zwdsk+WsUwi75ndOIz60TN8Wg16WOMq5KOBuYIneG2+CEFJHTppNLc2j
-r/ugTjTPeS/DAo5MtnK+zzHxT7JmMKypb23t6MaahSlER4THbYvWUwsw5mm2LsTe
-PTlb5mkvQnXkt6pN2UzZVyIdNFXRv1YZLdTcf4aJ0pZySvCdYoE9RaoP4/JI9GfS
-NXH7oOxI8YaxRGK5i6w/LZyhxkfbkPX+pbbe1Ept+SZCcwWVc/S6veGZWQ1pNHR2
-RW6F3WE0Mle6xWtvW1NlMs4ATEqS13GS4RVlgE07KTe/oBRkd+4NwXAQoEzUvoRr
-j5Ad7LVKeygeUUyaWP+qN7kBDQRMZG17AQgAypZBEfm9pM8Tr4ktsHp1xThYHvzT
-OScLPZcCaF1Gjg8em0cQI4z4yN+yffsmUD4/dGcRxZgVms/jTexKQ8Z/Ps3e4vRG
-b4RCFaY0KhW4t+TTJJ9I5wvFzXZj7zNFxiQWpueiq/cDiBY+Liv3zMSOBaXzxR6L
-7igNPKi/0ELLyCIU/okUwqc0O/4r5PgFANkMyvvVNqzxjC5s8MXbGivJXiML67/Y
-0M/siNqDSia/TGItpXjvi7v1zulbiIV0iSBkO3vsxNE0xXGBXY/UztAShN3FTbx9
-CZDupi35wgqK7McJ3WSjEDzwkElmwkmh7JdLziyH09kS1wRqiLcB+wSTywARAQAB
-iQElBBgBAgAPAhsMBQJVv8zOBQkSwWDOAAoJEDUCAPK5maNyLl4H/3n/+xZsuKia
-fHtBUMh44YRabEX1Bd10LAfxGlOZtKV/Dr1RaKetci6RRa5sJj0wKra6FhIryuqS
-jFTalPF3o8WjVEA5AjJ3ddSgAwX5gGJ3u+C0XMI0E6h/vAXh6meFxHtGinYr1Gcp
-P1/S3/Jy+0cmTt3FvqBtXtU3VIyb/4vUNZ+dY+jcw/gs/yS+s+jtR8hWUDbSrbU9
-pja+p1icNwU5pMbEfx1HYB7JCKuE0iJNbAFagRtPCOKq4vUTPDUQUB5MjWV+89+f
-cizh+doQR9z8e+/02drCCMWiUf4iiFs2dNHwaIPDOJ8Xn9xcxiUaKk32sjT3sict
-XO5tB2KhE3A=
-=YO7C
------END PGP PUBLIC KEY BLOCK-----
diff --git a/charms/trusty/cassandra/lib/juju-deployer-wrapper.py b/charms/trusty/cassandra/lib/juju-deployer-wrapper.py
deleted file mode 100755
index bf792f2..0000000
--- a/charms/trusty/cassandra/lib/juju-deployer-wrapper.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/python
-
-import subprocess
-import sys
-
-# Strip the -W option, as its noise messes with test output.
-args = list(sys.argv[1:])
-if '-W' in args:
- args.remove('-W')
-cmd = ['juju-deployer'] + args
-try:
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
-except subprocess.CalledProcessError as x:
- sys.stderr.write(x.output)
- sys.exit(x.returncode)
diff --git a/charms/trusty/cassandra/lib/testcharms/empty/hooks/install b/charms/trusty/cassandra/lib/testcharms/empty/hooks/install
deleted file mode 100755
index ce28f62..0000000
--- a/charms/trusty/cassandra/lib/testcharms/empty/hooks/install
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -ex
-# apt-get install \
-# python3-pip python3-dev python3-six python3-yaml \
-# build-essential libev4 libev-dev
-# pip3 install blist
-# pip3 install cassandra-driver
diff --git a/charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml b/charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml
deleted file mode 100644
index bc3e0f0..0000000
--- a/charms/trusty/cassandra/lib/testcharms/empty/metadata.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-name: empty
-maintainer: Somchai Turdsak <somchai@example.com>
-summary: A charm that does nothing except define some relations.
-description: |
- This is a client charm for testing the Cassandra client relations.
-tags: ["databases"]
-requires:
- database:
- interface: cassandra
- database-admin:
- interface: cassandra-admin
diff --git a/charms/trusty/cassandra/metadata.yaml b/charms/trusty/cassandra/metadata.yaml
deleted file mode 100644
index 7ec537a..0000000
--- a/charms/trusty/cassandra/metadata.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-name: cassandra
-maintainer: Stuart Bishop <stuart.bishop@canonical.com>
-summary: distributed storage system for structured data
-description: |
- Cassandra is a distributed (peer-to-peer) system for the management and
- storage of structured data.
-tags: ["databases"]
-provides:
- database:
- interface: cassandra
- database-admin:
- interface: cassandra-admin
- nrpe-external-master:
- interface: nrpe-external-master
- scope: container
- data:
- interface: block-storage
- scope: container
- optional: true
-peers:
- cluster:
- interface: cassandra-cluster
diff --git a/charms/trusty/cassandra/scripts/volume-common.sh b/charms/trusty/cassandra/scripts/volume-common.sh
deleted file mode 100755
index 3af8ff1..0000000
--- a/charms/trusty/cassandra/scripts/volume-common.sh
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/bin/bash
-# Author: JuanJo Ciarlante <jjo@canonical.com>
-# Copyright: Canonical Ltd. 2012
-# License: GPLv2
-#
-# juju storage common shell library
-#
-
-#------------------------------
-# Returns a mount point from passed vol-id, e.g. /srv/juju/vol-000012345
-#
-# @param $1 volume id
-# @echoes mntpoint-path eg /srv/juju/vol-000012345
-#------------------------------
-_mntpoint_from_volid() {
- local volid=${1?missing volid}
- [[ ${volid} != "" ]] && echo /srv/juju/${volid} || echo ""
-}
-
-
-#------------------------------
-# Assert that passed mount points hold different filesystems
-#
-# @param $1 mntpoint1
-# @param $2 mntpoint2
-# @return 0 different FS
-# 1 same FS
-#------------------------------
-_assert_diff_fs() {
- local mnt1="${1:?missing mntpoint1}"
- local mnt2="${2:?missing mntpoint2}"
- local fsid1 fsid2
- fsid1=$(stat --file-system -c '%i' "${mnt1}" 2>/dev/null)
- fsid2=$(stat --file-system -c '%i' "${mnt2}" 2>/dev/null)
- [[ ${fsid1} != ${fsid2} ]]
- return $?
-}
-
-#------------------------------
-# Initialize volume (sfdisk, mkfs.ext4) IFF NOT already, mount it at
-# /srv/juju/<volume-id>
-#
-# @param $1 volume-id, can be any arbitrary string, better if
-# equal to EC2/OS vol-id name (just for consistency)
-# @return 0 success
-# 1 nil volid/etc
-# 2 error while handling the device (non-block device, sfdisk error, etc)
-#------------------------------
-volume_init_and_mount() {
- ## Find 1st unused device (reverse sort /dev/vdX)
- local volid=${1:?missing volid}
- local dev_regexp
- local dev found_dev=
- local label="${volid}"
- local func=${FUNCNAME[0]}
- dev_regexp=$(config-get volume-dev-regexp) || return 1
- mntpoint=$(_mntpoint_from_volid ${volid})
-
- [[ -z ${mntpoint} ]] && return 1
- if mount | egrep -qw "${mntpoint}";then
- _assert_diff_fs "/" "${mntpoint}" || {
- juju-log "ERROR: returning from ${func} with '${mntpoint}' still at '/' filesystem"
- return 1
- }
- juju-log "NOTICE: mntpoint=${mntpoint} already mounted, skipping volume_init_and_mount"
- return 0
- fi
-
- # Sanitize
- case "${dev_regexp?}" in
- # Careful: this is glob matching against an regexp -
- # quite narrowed
- /dev/*|/dev/disk/by-*)
- ;; ## Ok
- *)
- juju-log "ERROR: invalid 'volume-dev-regexp' specified"
- return 1
- ;;
- esac
-
- # Assume udev will create only existing devices
- for dev in $(ls -rd1 /dev/* | egrep "${dev_regexp}" | egrep -v "[1-9]$" 2>/dev/null);do
- ## Check it's not already mounted
- mount | egrep -q "${dev}[1-9]?" || { found_dev=${dev}; break;}
- done
- [[ -n "${found_dev}" ]] || {
- juju-log "ERROR: ${func}: coult not find an unused device for regexp: ${dev_regexp}"
- return 1
- }
- partition1_dev=${found_dev}1
-
- juju-log "INFO: ${func}: found_dev=${found_dev}"
- [[ -b ${found_dev?} ]] || {
- juju-log "ERROR: ${func}: ${found_dev} is not a blockdevice"
- return 2
- }
-
- # Run next set of "dangerous" commands as 'set -e', in a subshell
- (
- set -e
- # Re-read partition - will fail if already in use
- blockdev --rereadpt ${found_dev}
-
- # IFF not present, create partition with full disk
- if [[ -b ${partition1_dev?} ]];then
- juju-log "INFO: ${func}: ${partition1_dev} already present - skipping sfdisk."
- else
- juju-log "NOTICE: ${func}: ${partition1_dev} not present at ${found_dev}, running: sfdisk ${found_dev} ..."
- # Format partition1_dev as max sized
- echo ",+," | sfdisk ${found_dev}
- fi
-
- # Create an ext4 filesystem if NOT already present
- # use e.g. LABEl=vol-000012345
- if file -s ${partition1_dev} | egrep -q ext4 ; then
- juju-log "INFO: ${func}: ${partition1_dev} already formatted as ext4 - skipping mkfs.ext4."
- ## Check e2label - log if it has changed (e.g. already used / initialized with a diff label)
- local curr_label=$(e2label "${partition1_dev}")
- if [[ ${curr_label} != ${label} ]]; then
- juju-log "WARNING: ${func}: ${partition1_dev} had label=${curr_label}, overwritting with label=${label}"
- e2label ${partition1_dev} "${label}"
- fi
- else
- juju-log "NOTICE: ${func}: running: mkfs.ext4 -L ${label} ${partition1_dev}"
- mkfs.ext4 -L "${label}" ${partition1_dev}
- fi
-
- # Mount it at e.g. /srv/juju/vol-000012345
- [[ -d "${mntpoint}" ]] || mkdir -p "${mntpoint}"
- mount | fgrep -wq "${partition1_dev}" || {
- local files_below_mntpoint="$(cd ${mntpoint}; ls -1A |wc -l )"
- if [[ ${files_below_mntpoint} -ne 0 ]]; then
- juju-log "ERROR: *not* doing 'mount "${partition1_dev}" "${mntpoint}"' because there are already ${files_below_mntpoint} files/dirs beneath '${mntpoint}'"
- exit 1
- fi
- ## should always fsck before mounting (e.g. fsck after max time (-i) / max mounts (-c) )
- fsck "${partition1_dev}"
- mount "${partition1_dev}" "${mntpoint}"
- juju-log "INFO: ${func}: mounted as: '$(mount | fgrep -w ${partition1_dev})'"
- }
-
- # Add it to fstab is not already there
- fgrep -wq "LABEL=${label}" /etc/fstab || {
- echo "LABEL=${label} ${mntpoint} ext4 defaults,nobootwait,comment=${volid}" | tee -a /etc/fstab
- juju-log "INFO: ${func}: LABEL=${label} added to /etc/fstab"
- }
- )
- # Final assertion: mounted filesystem id is different from '/' (effectively mounted)
- _assert_diff_fs "/" "${mntpoint}" || {
- juju-log "ERROR: returning from ${func} with '${mntpoint}' still at '/' filesystem (couldn't mount new volume)"
- ## try to rmdir mntpoint directory - should not be 'mistakenly' used
- rmdir ${mntpoint}
- return 1
- }
- return $?
-}
-
-#------------------------------
-# Get volume-id from juju config "volume-map" dictionary as
-# volume-map[JUJU_UNIT_NAME]
-# @return 0 if volume-map value found ( does echo volid or ""), else:
-# 1 if not found or None
-#
-#------------------------------
-volume_get_volid_from_volume_map() {
- local volid=$(config-get "volume-map"|python -c$'import sys;import os;from yaml import load;from itertools import chain; volume_map = load(sys.stdin)\nif volume_map: print volume_map.get(os.environ["JUJU_UNIT_NAME"])')
- [[ $volid == None ]] && return 1
- echo "$volid"
-}
-
-# Returns true if permanent storage (considers --ephemeral)
-# @returns 0 if volid set and not --ephemeral, else:
-# 1
-volume_is_permanent() {
- local volid=${1:?missing volid}
- [[ -n ${volid} && ${volid} != --ephemeral ]] && return 0 || return 1
-}
-volume_mount_point_from_volid(){
- local volid=${1:?missing volid}
- if volume_is_permanent "${volid}";then
- echo "/srv/juju/${volid}"
- return 0
- else
- return 1
- fi
-}
-# Do we have a valid storage state?
-# @returns 0 does echo $volid (can be "--ephemeral")
-# 1 config state is invalid - we should not serve
-volume_get_volume_id() {
- local ephemeral_storage
- local volid
- ephemeral_storage=$(config-get volume-ephemeral-storage) || return 1
- volid=$(volume_get_volid_from_volume_map) || return 1
- if [[ $ephemeral_storage == True ]];then
- # Ephemeral -> should not have a valid volid
- if [[ $volid != "" ]];then
- juju-log "ERROR: volume-ephemeral-storage is True, but $JUJU_UNIT_NAME maps to volid=${volid}"
- return 1
- fi
- else
- # Durable (not ephemeral) -> must have a valid volid for this unit
- if [[ $volid == "" ]];then
- juju-log "ERROR: volume-ephemeral-storage is False, but no volid found for: $JUJU_UNIT_NAME"
- return 1
- fi
- fi
- echo "$volid"
- return 0
-}
-
-case "$1" in
- ## allow non SHELL scripts to call helper functions
- call)
- : ${JUJU_UNIT_NAME?} ## Must be called in juju environment
- shift;
- function="${1:?usage: ${0##*/} call function arg1 arg2 ...}"
- shift;
- ${function} "$@" && exit 0 || exit 1
-esac
diff --git a/charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl b/charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl
deleted file mode 100644
index 3a9b9d4..0000000
--- a/charms/trusty/cassandra/templates/cassandra_maintenance_cron.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-# Cassandra maintenance
-# Staggered weekly repairs
-# m h dom mon dow user command
-{{minute}} {{hour}} * * {{dow}} cassandra run-one-until-success nodetool repair -pr >> /var/log/cassandra/maintenance.log 2>&1
-
-# EOF
diff --git a/charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl b/charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl
deleted file mode 100644
index 53318fa..0000000
--- a/charms/trusty/cassandra/templates/nrpe_cmd_file.tmpl
+++ /dev/null
@@ -1,6 +0,0 @@
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-# ${NAGIOS_CONTEXT} ${SERVICE_DESCRIPTION}
-command[${NRPE_CMD_NAME}]=${NRPE_CMD}
-
diff --git a/charms/trusty/cassandra/templates/nrpe_service_file.tmpl b/charms/trusty/cassandra/templates/nrpe_service_file.tmpl
deleted file mode 100644
index ab0b76f..0000000
--- a/charms/trusty/cassandra/templates/nrpe_service_file.tmpl
+++ /dev/null
@@ -1,10 +0,0 @@
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-define service {
- use active-service
- host_name ${NAGIOS_HOSTNAME}
- service_description ${NAGIOS_HOSTNAME} ${SERVICE_DESCRIPTION}
- check_command check_nrpe!${NRPE_CMD_NAME}
- servicegroups ${NAGIOS_SERVICEGROUP},
-}
diff --git a/charms/trusty/cassandra/testing/__init__.py b/charms/trusty/cassandra/testing/__init__.py
deleted file mode 100644
index b1b7fcd..0000000
--- a/charms/trusty/cassandra/testing/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/testing/amuletfixture.py b/charms/trusty/cassandra/testing/amuletfixture.py
deleted file mode 100644
index 988267f..0000000
--- a/charms/trusty/cassandra/testing/amuletfixture.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from functools import wraps
-import json
-import os
-import shutil
-import subprocess
-import tempfile
-import time
-
-import amulet
-import yaml
-
-
-class AmuletFixture(amulet.Deployment):
- def __init__(self, series, verbose=False):
- if verbose:
- super(AmuletFixture, self).__init__(series=series)
- else:
- # We use a wrapper around juju-deployer so we can fix how it is
- # invoked. In particular, turn off all the noise so we can
- # actually read our test output.
- juju_deployer = os.path.abspath(os.path.join(
- os.path.dirname(__file__), os.pardir, 'lib',
- 'juju-deployer-wrapper.py'))
- super(AmuletFixture, self).__init__(series=series,
- juju_deployer=juju_deployer)
- assert self.series == series
-
- def setUp(self):
- self._temp_dirs = []
-
- self.reset_environment(force=True)
-
- # Repackage our charm to a temporary directory, allowing us
- # to strip our virtualenv symlinks that would otherwise cause
- # juju to abort. We also strip the .bzr directory, working
- # around Bug #1394078.
- self.repackage_charm()
-
- # Fix amulet.Deployment so it doesn't depend on environment
- # variables or the current working directory, but rather the
- # environment we have introspected.
- with open(os.path.join(self.charm_dir, 'metadata.yaml'), 'r') as s:
- self.charm_name = yaml.safe_load(s)['name']
- self.charm_cache.test_charm = None
- self.charm_cache.fetch(self.charm_name, self.charm_dir,
- series=self.series)
-
- # Explicitly reset $JUJU_REPOSITORY to ensure amulet and
- # juju-deployer does not mess with the real one, per Bug #1393792
- self.org_repo = os.environ.get('JUJU_REPOSITORY', None)
- temp_repo = tempfile.mkdtemp(suffix='.repo')
- self._temp_dirs.append(temp_repo)
- os.environ['JUJU_REPOSITORY'] = temp_repo
- os.makedirs(os.path.join(temp_repo, self.series), mode=0o700)
-
- def tearDown(self, reset_environment=True):
- if reset_environment:
- self.reset_environment()
- if self.org_repo is None:
- del os.environ['JUJU_REPOSITORY']
- else:
- os.environ['JUJU_REPOSITORY'] = self.org_repo
-
- def deploy(self, timeout=None):
- '''Deploying or updating the configured system.
-
- Invokes amulet.Deployer.setup with a nicer name and standard
- timeout handling.
- '''
- if timeout is None:
- timeout = int(os.environ.get('AMULET_TIMEOUT', 900))
-
- # juju-deployer is buried under here, and has race conditions.
- # Sleep a bit before invoking it, so its cached view of the
- # environment matches reality.
- time.sleep(15)
-
- # If setUp fails, tearDown is never called leaving the
- # environment setup. This is useful for debugging.
- self.setup(timeout=timeout)
- self.wait(timeout=timeout)
-
- def __del__(self):
- for temp_dir in self._temp_dirs:
- if os.path.exists(temp_dir):
- shutil.rmtree(temp_dir, ignore_errors=True)
-
- def get_status(self):
- try:
- raw = subprocess.check_output(['juju', 'status', '--format=json'],
- universal_newlines=True)
- except subprocess.CalledProcessError as x:
- print(x.output)
- raise
- if raw:
- return json.loads(raw)
- return None
-
- def wait(self, timeout=None):
- '''Wait until the environment has reached a stable state.'''
- if timeout is None:
- timeout = int(os.environ.get('AMULET_TIMEOUT', 900))
- cmd = ['timeout', str(timeout), 'juju', 'wait', '-q']
- try:
- subprocess.check_output(cmd, universal_newlines=True)
- except subprocess.CalledProcessError as x:
- print(x.output)
- raise
-
- def reset_environment(self, force=False):
- if force:
- status = self.get_status()
- machines = [m for m in status.get('machines', {}).keys()
- if m != '0']
- if machines:
- subprocess.call(['juju', 'destroy-machine',
- '--force'] + machines,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL)
- fails = dict()
- while True:
- status = self.get_status()
- service_items = status.get('services', {}).items()
- if not service_items:
- break
- for service_name, service in service_items:
- if service.get('life', '') not in ('dying', 'dead'):
- subprocess.call(['juju', 'destroy-service', service_name],
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- for unit_name, unit in service.get('units', {}).items():
- if unit.get('agent-state', None) == 'error':
- if force:
- # If any units have failed hooks, unstick them.
- # This should no longer happen now we are
- # using the 'destroy-machine --force' command
- # earlier.
- try:
- subprocess.check_output(
- ['juju', 'resolved', unit_name],
- stderr=subprocess.STDOUT)
- except subprocess.CalledProcessError:
- # A previous 'resolved' call make cause a
- # subsequent one to fail if it is still
- # being processed. However, we need to keep
- # retrying because after a successful
- # resolution a subsequent hook may cause an
- # error state.
- pass
- else:
- fails[unit_name] = unit
- time.sleep(1)
-
- harvest_machines = []
- for machine, state in status.get('machines', {}).items():
- if machine != "0" and state.get('life') not in ('dying', 'dead'):
- harvest_machines.append(machine)
-
- if harvest_machines:
- cmd = ['juju', 'remove-machine', '--force'] + harvest_machines
- subprocess.check_output(cmd, stderr=subprocess.STDOUT)
-
- if fails:
- raise Exception("Teardown failed", fails)
-
- def repackage_charm(self):
- """Mirror the charm into a staging area.
-
- We do this to work around issues with Amulet, juju-deployer
- and juju. In particular:
- - symlinks in the Python virtual env pointing outside of the
- charm directory.
- - odd bzr interactions, such as tests being run on the committed
- version of the charm, rather than the working tree.
-
- Returns the test charm directory.
- """
- # Find the charm_dir we are testing
- src_charm_dir = os.path.dirname(__file__)
- while True:
- if os.path.exists(os.path.join(src_charm_dir,
- 'metadata.yaml')):
- break
- assert src_charm_dir != os.sep, 'metadata.yaml not found'
- src_charm_dir = os.path.abspath(os.path.join(src_charm_dir,
- os.pardir))
-
- with open(os.path.join(src_charm_dir, 'metadata.yaml'), 'r') as s:
- self.charm_name = yaml.safe_load(s)['name']
-
- repack_root = tempfile.mkdtemp(suffix='.charm')
- self._temp_dirs.append(repack_root)
- # juju-deployer now requires the series in the path when
- # deploying from an absolute path.
- repack_root = os.path.join(repack_root, self.series)
- os.makedirs(repack_root, mode=0o700)
-
- self.charm_dir = os.path.join(repack_root, self.charm_name)
-
- # Ignore .bzr to work around weird bzr interactions with
- # juju-deployer, per Bug #1394078, and ignore .venv
- # due to a) it containing symlinks juju will reject and b) to avoid
- # infinite recursion.
- shutil.copytree(src_charm_dir, self.charm_dir, symlinks=True,
- ignore=shutil.ignore_patterns('.venv?', '.bzr'))
-
-
-# Bug #1417097 means we need to monkey patch Amulet for now.
-real_juju = amulet.helpers.juju
-
-
-@wraps(real_juju)
-def patched_juju(args, env=None):
- args = [str(a) for a in args]
- return real_juju(args, env)
-
-amulet.helpers.juju = patched_juju
-amulet.deployer.juju = patched_juju
diff --git a/charms/trusty/cassandra/testing/mocks.py b/charms/trusty/cassandra/testing/mocks.py
deleted file mode 100644
index 7d03f23..0000000
--- a/charms/trusty/cassandra/testing/mocks.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-'''
-charm-helpers mocks.
-'''
-import os.path
-import shutil
-import tempfile
-from unittest.mock import patch
-
-import yaml
-
-from charmhelpers import fetch
-from charmhelpers.core import hookenv
-
-
-CHARM_DIR = os.path.abspath(os.path.join(
- os.path.dirname(__file__), os.pardir))
-
-
-def mock_charmhelpers(test_case):
- hookenv.cache.clear() # Clear the hookenv memorisation.
-
- mocks = []
-
- # Mock environment
- charm_dir = tempfile.TemporaryDirectory()
- test_case.addCleanup(charm_dir.cleanup)
- mock_env = patch.dict(os.environ, dict(CHARM_DIR=charm_dir.name))
- mock_env.start()
- test_case.addCleanup(mock_env.stop)
- shutil.copyfile(os.path.join(CHARM_DIR, 'metadata.yaml'),
- os.path.join(charm_dir.name, 'metadata.yaml'))
-
- # Mock config.
- # Set items:
- # hookenv.config()['foo'] = 'bar'
- # Reset 'previous' state:
- # hookenv.config().save();
- # hookenv.config().load_previous()
- config = hookenv.Config()
- tmp = tempfile.NamedTemporaryFile(suffix='.config')
- config.CONFIG_FILE_NAME = tmp.name
- test_case.addCleanup(tmp.close)
- with open(os.path.join(CHARM_DIR, 'config.yaml'), 'rb') as f:
- defaults = yaml.safe_load(f)['options']
- for k, v in defaults.items():
- opt_type = v.get('type', 'string')
- opt_val = v.get('default', None)
- if opt_val is None:
- config[k] = None
- elif opt_type == 'string':
- config[k] = str(opt_val)
- elif opt_type == 'int':
- config[k] = int(opt_val)
- elif opt_type == 'boolean':
- config[k] = bool(opt_val)
-
- def mock_config(scope=None):
- if scope is None:
- return config
- return config.get(scope, None)
- mocks.append(patch('charmhelpers.core.hookenv.config',
- side_effect=mock_config, autospec=True))
-
- # Magic mocks.
- methods = [
- 'charmhelpers.core.hookenv.log',
- 'charmhelpers.core.hookenv.hook_name',
- 'charmhelpers.core.hookenv.related_units',
- 'charmhelpers.core.hookenv.relation_get',
- 'charmhelpers.core.hookenv.relation_set',
- 'charmhelpers.core.hookenv.relation_ids',
- 'charmhelpers.core.hookenv.relation_type',
- 'charmhelpers.core.hookenv.service_name',
- 'charmhelpers.core.hookenv.local_unit',
- 'charmhelpers.core.hookenv.unit_private_ip',
- 'charmhelpers.core.hookenv.unit_public_ip',
- 'charmhelpers.core.host.log',
- 'charmhelpers.fetch.filter_installed_packages',
- 'os.chown', 'os.fchown',
- ]
- for m in methods:
- mocks.append(patch(m, autospec=True))
-
- for mock in mocks:
- mock.start()
- test_case.addCleanup(mock.stop)
-
- hookenv.local_unit.return_value = 'service/1'
-
- def mock_unit_private_ip():
- return '10.20.0.{}'.format(hookenv.local_unit().split('/')[-1])
- hookenv.unit_private_ip.side_effect = mock_unit_private_ip
-
- def mock_unit_public_ip():
- return '10.30.0.{}'.format(hookenv.local_unit().split('/')[-1])
- hookenv.unit_public_ip.side_effect = mock_unit_public_ip
-
- def mock_service_name():
- return hookenv.local_unit().split('/')[0]
- hookenv.service_name.side_effect = mock_service_name
-
- hookenv.relation_ids.side_effect = (
- lambda x: ['{}:1'.format(x)] if x else [])
- hookenv.related_units.return_value = ('service/2', 'service/3')
-
- relinfos = dict()
-
- def mock_relation_set(relation_id=None, relation_settings=None, **kwargs):
- if relation_id is None:
- relation_id = hookenv.relation_id()
- unit = hookenv.local_unit()
- relinfo = mock_relation_get(unit=unit, rid=relation_id)
- if relation_settings is not None:
- relinfo.update(relation_settings)
- relinfo.update(kwargs)
- return None
- hookenv.relation_set.side_effect = mock_relation_set
-
- def mock_relation_get(attribute=None, unit=None, rid=None):
- if rid is None:
- rid = hookenv.relation_id()
- if unit is None:
- unit = hookenv.remove_unit()
- service, unit_num = unit.split('/')
- unit_num = int(unit_num)
- relinfos.setdefault(rid, {})
- relinfos[rid].setdefault(
- unit, {'private-address': '10.20.0.{}'.format(unit_num)})
- relinfo = relinfos[rid][unit]
- if attribute is None or attribute == '-':
- return relinfo
- return relinfo.get(attribute)
- hookenv.relation_get.side_effect = mock_relation_get
-
- def mock_chown(target, uid, gid):
- assert uid == 0
- assert gid == 0
- assert os.path.exists(target)
- os.chown.side_effect = mock_chown
-
- def mock_fchown(fd, uid, gid):
- assert uid == 0
- assert gid == 0
- os.fchown.side_effect = mock_fchown
-
- fetch.filter_installed_packages.side_effect = lambda pkgs: list(pkgs)
-
- def mock_relation_for_unit(unit=None, rid=None):
- if unit is None:
- unit = hookenv.remote_unit()
- service, unit_num = unit.split('/')
- unit_num = int(unit_num)
- return {'private-address': '10.20.0.{}'.format(unit_num)}
- hookenv.relation_for_unit.side_effect = mock_relation_for_unit
-
- def mock_chown(target, uid, gid):
- assert uid == 0
- assert gid == 0
- assert os.path.exists(target)
- os.chown.side_effect = mock_chown
-
- def mock_fchown(fd, uid, gid):
- assert uid == 0
- assert gid == 0
- os.fchown.side_effect = mock_fchown
-
- fetch.filter_installed_packages.side_effect = lambda pkgs: list(pkgs)
diff --git a/charms/trusty/cassandra/tests/__init__.py b/charms/trusty/cassandra/tests/__init__.py
deleted file mode 100644
index b1b7fcd..0000000
--- a/charms/trusty/cassandra/tests/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/cassandra/tests/base.py b/charms/trusty/cassandra/tests/base.py
deleted file mode 100755
index d308985..0000000
--- a/charms/trusty/cassandra/tests/base.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import functools
-from itertools import count
-import unittest
-from unittest.mock import patch
-
-from testing.mocks import mock_charmhelpers
-
-patch = functools.partial(patch, autospec=True) # autospec by default.
-
-
-class TestCaseBase(unittest.TestCase):
- def setUp(self):
- super(TestCaseBase, self).setUp()
-
- mock_charmhelpers(self)
-
- is_lxc = patch('helpers.is_lxc', return_value=False)
- is_lxc.start()
- self.addCleanup(is_lxc.stop)
-
- emit = patch('helpers.emit')
- emit.start()
- self.addCleanup(emit.stop)
-
- time = patch('time.time', side_effect=count(1))
- time.start()
- self.addCleanup(time.stop)
diff --git a/charms/trusty/cassandra/tests/test_actions.py b/charms/trusty/cassandra/tests/test_actions.py
deleted file mode 100755
index f97df0c..0000000
--- a/charms/trusty/cassandra/tests/test_actions.py
+++ /dev/null
@@ -1,1156 +0,0 @@
-#!.venv3/bin/python3
-
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-from itertools import repeat
-import os.path
-import re
-import shutil
-import subprocess
-import tempfile
-from textwrap import dedent
-import unittest
-from unittest.mock import ANY, call, patch, sentinel
-import yaml
-
-import cassandra
-from charmhelpers.core import hookenv
-
-from tests.base import TestCaseBase
-import actions
-from coordinator import coordinator
-import helpers
-
-
-class TestActions(TestCaseBase):
- def test_action_wrapper(self):
- @actions.action
- def somefunc(*args, **kw):
- return 42, args, kw
-
- hookenv.hook_name.return_value = 'catch-fire'
-
- # The wrapper stripts the servicename argument, which we have no
- # use for, logs a message and invokes the wrapped function.
- hookenv.remote_unit.return_value = None
- self.assertEqual(somefunc('sn', 1, foo=4), (42, (1,), dict(foo=4)))
- hookenv.log.assert_called_once_with('** Action catch-fire/somefunc')
-
- # Different log message if there is a remote unit.
- hookenv.log.reset_mock()
- os.environ['JUJU_REMOTE_UNIT'] = 'foo'
- self.assertEqual(somefunc('sn', 1, foo=4), (42, (1,), dict(foo=4)))
- hookenv.log.assert_called_once_with(
- '** Action catch-fire/somefunc (foo)')
-
- def test_revert_unchangeable_config(self):
- config = hookenv.config()
-
- self.assertIn('datacenter', actions.UNCHANGEABLE_KEYS)
-
- # In the first hook, revert does nothing as there is nothing to
- # revert too.
- config['datacenter'] = 'mission_control'
- self.assertTrue(config.changed('datacenter'))
- actions.revert_unchangeable_config('')
- self.assertEqual(config['datacenter'], 'mission_control')
-
- config.save()
- config.load_previous()
- config['datacenter'] = 'orbital_1'
-
- actions.revert_unchangeable_config('')
- self.assertEqual(config['datacenter'], 'mission_control') # Reverted
-
- hookenv.log.assert_any_call(ANY, hookenv.ERROR) # Logged the problem.
-
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_leader_only(self, is_leader):
-
- @actions.leader_only
- def f(*args, **kw):
- return args, kw
-
- is_leader.return_value = False
- self.assertIsNone(f(1, foo='bar'))
-
- is_leader.return_value = True
- self.assertEqual(f(1, foo='bar'), ((1,), dict(foo='bar')))
-
- def test_set_proxy(self):
- # NB. Environment is already mocked.
- os.environ['http_proxy'] = ''
- os.environ['https_proxy'] = ''
- actions.set_proxy('')
- self.assertEqual(os.environ['http_proxy'], '')
- self.assertEqual(os.environ['https_proxy'], '')
- hookenv.config()['http_proxy'] = 'foo'
- actions.set_proxy('')
- self.assertEqual(os.environ['http_proxy'], 'foo')
- self.assertEqual(os.environ['https_proxy'], 'foo')
-
- @patch('subprocess.check_call')
- def test_preinstall(self, check_call):
- # Noop if there are no preinstall hooks found running the
- # install hook.
- hookenv.hook_name.return_value = 'install'
- actions.preinstall('')
- self.assertFalse(check_call.called)
- hookenv.log.assert_any_call('No preinstall hooks found')
-
- # If preinstall hooks are found running the install hook,
- # the preinstall hooks are run.
- hook_dirs = []
- hook_files = []
- for i in range(1, 3):
- hook_dirs.append(os.path.join(hookenv.charm_dir(),
- 'exec.d', str(i)))
- hook_files.append(os.path.join(hook_dirs[-1], 'charm-pre-install'))
-
- os.makedirs(hook_dirs[-1])
- with open(hook_files[-1], 'w') as f1:
- print('mocked', file=f1)
- os.chmod(hook_files[-1], 0o755)
-
- check_call.reset_mock()
- actions.preinstall('')
-
- calls = [call(['sh', '-c', f2]) for f2 in hook_files]
- check_call.assert_has_calls(calls)
-
- # If a preinstall hook is not executable, a warning is raised.
- hook_dir = os.path.join(hookenv.charm_dir(), 'exec.d', '55')
- hook_file = os.path.join(hook_dir, 'charm-pre-install')
- os.makedirs(hook_dir)
- with open(hook_file, 'w') as f1:
- print('whoops', file=f1)
- os.chmod(hook_file, 0o644)
- check_call.reset_mock()
- hookenv.log.reset_mock()
- actions.preinstall('')
- check_call.assert_has_calls(calls) # Only previous hooks run.
- hookenv.log.assert_has_calls([
- call(ANY),
- call(ANY),
- call(ANY, hookenv.WARNING)])
-
- # Nothing happens if the install hook is not being run.
- hookenv.hook_name.return_value = 'config-changed'
- check_call.reset_mock()
- actions.preinstall('')
- self.assertFalse(check_call.called)
-
- @patch('subprocess.check_call')
- def test_swapoff(self, check_call):
- fstab = (
- b'UUID=abc / ext4 errors=remount-ro 0 1\n'
- b'/dev/mapper/cryptswap1 none swap sw 0 0')
- with tempfile.NamedTemporaryFile() as f:
- f.write(fstab)
- f.flush()
- actions.swapoff('', f.name)
- f.seek(0)
- self.assertTrue(b'swap' not in f.read())
-
- check_call.assert_called_once_with(['swapoff', '-a'])
-
- @patch('subprocess.check_call')
- def test_swapoff_fails(self, check_call):
- check_call.side_effect = RuntimeError()
- actions.swapoff('', '')
- # A warning is generated if swapoff fails.
- hookenv.log.assert_any_call(ANY, hookenv.WARNING)
-
- @patch('subprocess.check_call')
- def test_swapoff_lxc(self, check_call):
- # Under LXC, the swapoff action does nothing except log.
- helpers.is_lxc.return_value = True
- actions.swapoff('')
- self.assertFalse(check_call.called)
-
- @patch('charmhelpers.fetch.configure_sources')
- def test_configure_sources(self, configure_sources):
- config = hookenv.config()
-
- # fetch.configure_sources called the first time
- actions.configure_sources('')
- configure_sources.assert_called_once_with(True)
-
- # fetch.configure_sources not called if relevant config is unchanged.
- config.save()
- config.load_previous()
- configure_sources.reset_mock()
- actions.configure_sources('')
- self.assertFalse(configure_sources.called)
-
- # Changing install_sources causes fetch.configure_sources to be
- # called.
- config.save()
- config.load_previous()
- configure_sources.reset_mock()
- config['install_sources'] = 'foo'
- actions.configure_sources('')
- configure_sources.assert_called_once_with(True)
-
- # Changing install_keys causes fetch.configure_sources to be
- # called.
- config.save()
- config.load_previous()
- configure_sources.reset_mock()
- config['install_keys'] = 'foo'
- actions.configure_sources('')
- configure_sources.assert_called_once_with(True)
-
- @patch('charmhelpers.core.hookenv.charm_dir')
- @patch('subprocess.check_call')
- def test_add_implicit_package_signing_keys(self, check_call, charm_dir):
- charm_dir.return_value = os.path.join(os.path.dirname(__file__),
- os.pardir)
- actions.add_implicit_package_signing_keys('')
-
- keys = ['apache', 'datastax']
-
- self.assertEqual(check_call.call_count, len(keys))
-
- for k in keys:
- with self.subTest(key=k):
- path = os.path.join(hookenv.charm_dir(),
- 'lib', '{}.key'.format(k))
- self.assertTrue(os.path.exists(path))
- check_call.assert_any_call(['apt-key', 'add', path],
- stdin=subprocess.DEVNULL)
-
- @patch('charmhelpers.core.host.write_file')
- @patch('subprocess.check_call')
- def test_reset_sysctl(self, check_call, write_file):
- actions.reset_sysctl('')
-
- ctl_file = '/etc/sysctl.d/99-cassandra.conf'
- # Magic value per Cassandra best practice.
- write_file.assert_called_once_with(ctl_file,
- b"vm.max_map_count = 131072\n")
- check_call.assert_called_once_with(['sysctl', '-p',
- '/etc/sysctl.d/99-cassandra.conf'])
-
- @patch('subprocess.check_call')
- @patch('charmhelpers.core.host.write_file')
- def test_reset_sysctl_expected_fails(self, write_file, check_call):
- check_call.side_effect = repeat(OSError(errno.EACCES,
- 'Permission Denied'))
- actions.reset_sysctl('')
- # A warning is generated if permission denied was raised.
- hookenv.log.assert_any_call(ANY, hookenv.WARNING)
-
- @patch('subprocess.check_call')
- @patch('charmhelpers.core.host.write_file')
- def test_reset_sysctl_fails_badly(self, write_file, check_call):
- # Other OSErrors are reraised since we don't know how to handle
- # them.
- check_call.side_effect = repeat(OSError(errno.EFAULT, 'Whoops'))
- self.assertRaises(OSError, actions.reset_sysctl, '')
-
- @patch('subprocess.check_call')
- def test_reset_sysctl_lxc(self, check_call):
- helpers.is_lxc.return_value = True
- actions.reset_sysctl('')
- self.assertFalse(check_call.called)
- hookenv.log.assert_any_call('In an LXC. '
- 'Leaving sysctl unchanged.')
-
- @patch('helpers.get_cassandra_packages')
- @patch('helpers.ensure_package_status')
- def test_ensure_cassandra_package_status(self, ensure_package_status,
- get_cassandra_packages):
- get_cassandra_packages.return_value = sentinel.cassandra_packages
- actions.ensure_cassandra_package_status('')
- ensure_package_status.assert_called_once_with(
- sentinel.cassandra_packages)
-
- @patch('subprocess.check_call')
- @patch('helpers.get_jre')
- @patch('helpers.get_cassandra_packages')
- @patch('helpers.install_packages')
- def test_install_cassandra_packages(self, install_packages,
- get_cassandra_packages,
- get_jre, check_call):
- get_cassandra_packages.return_value = sentinel.cassandra_packages
- get_jre.return_value = 'openjdk'
- actions.install_cassandra_packages('')
- install_packages.assert_called_once_with(sentinel.cassandra_packages)
- check_call.assert_called_once_with(['update-java-alternatives',
- '--jre-headless', '--set',
- 'java-1.8.0-openjdk-amd64'])
-
- @patch('subprocess.check_call')
- @patch('helpers.get_jre')
- @patch('helpers.get_cassandra_packages')
- @patch('helpers.install_packages')
- def test_install_cassandra_packages_oracle(self, install_packages,
- get_cassandra_packages,
- get_jre, check_call):
- get_cassandra_packages.return_value = sentinel.cassandra_packages
- get_jre.return_value = 'oracle'
- actions.install_cassandra_packages('')
- install_packages.assert_called_once_with(sentinel.cassandra_packages)
- # No alternatives selected, as the Oracle JRE installer method
- # handles this.
- self.assertFalse(check_call.called)
-
- @patch('actions._install_oracle_jre_tarball')
- @patch('actions._fetch_oracle_jre')
- def test_install_oracle_jre(self, fetch, install_tarball):
- fetch.return_value = sentinel.tarball
-
- actions.install_oracle_jre('')
- self.assertFalse(fetch.called)
- self.assertFalse(install_tarball.called)
-
- hookenv.config()['jre'] = 'oracle'
- actions.install_oracle_jre('')
- fetch.assert_called_once_with()
- install_tarball.assert_called_once_with(sentinel.tarball)
-
- @patch('helpers.status_set')
- @patch('urllib.request')
- def test_fetch_oracle_jre(self, req, status_set):
- config = hookenv.config()
- url = 'https://foo.example.com/server-jre-7u42-linux-x64.tar.gz'
- expected_tarball = os.path.join(hookenv.charm_dir(), 'lib',
- 'server-jre-7u42-linux-x64.tar.gz')
- config['private_jre_url'] = url
-
- # Create a dummy tarball, since the mock urlretrieve won't.
- os.makedirs(os.path.dirname(expected_tarball))
- with open(expected_tarball, 'w'):
- pass # Empty file
-
- self.assertEqual(actions._fetch_oracle_jre(), expected_tarball)
- req.urlretrieve.assert_called_once_with(url, expected_tarball)
-
- def test_fetch_oracle_jre_local(self):
- # Create an existing tarball. If it is found, it will be used
- # without needing to specify a remote url or actually download
- # anything.
- expected_tarball = os.path.join(hookenv.charm_dir(), 'lib',
- 'server-jre-7u42-linux-x64.tar.gz')
- os.makedirs(os.path.dirname(expected_tarball))
- with open(expected_tarball, 'w'):
- pass # Empty file
-
- self.assertEqual(actions._fetch_oracle_jre(), expected_tarball)
-
- @patch('helpers.status_set')
- def test_fetch_oracle_jre_notfound(self, status_set):
- with self.assertRaises(SystemExit) as x:
- actions._fetch_oracle_jre()
- self.assertEqual(x.code, 0)
- status_set.assert_called_once_with('blocked', ANY)
-
- @patch('subprocess.check_call')
- @patch('charmhelpers.core.host.mkdir')
- @patch('os.path.isdir')
- def test_install_oracle_jre_tarball(self, isdir, mkdir, check_call):
- isdir.return_value = False
-
- dest = '/usr/lib/jvm/java-8-oracle'
-
- actions._install_oracle_jre_tarball(sentinel.tarball)
- mkdir.assert_called_once_with(dest)
- check_call.assert_has_calls([
- call(['tar', '-xz', '-C', dest,
- '--strip-components=1', '-f', sentinel.tarball]),
- call(['update-alternatives', '--install',
- '/usr/bin/java', 'java',
- os.path.join(dest, 'bin', 'java'), '1']),
- call(['update-alternatives', '--set', 'java',
- os.path.join(dest, 'bin', 'java')]),
- call(['update-alternatives', '--install',
- '/usr/bin/javac', 'javac',
- os.path.join(dest, 'bin', 'javac'), '1']),
- call(['update-alternatives', '--set', 'javac',
- os.path.join(dest, 'bin', 'javac')])])
-
- @patch('os.path.exists')
- @patch('subprocess.check_call')
- @patch('charmhelpers.core.host.mkdir')
- @patch('os.path.isdir')
- def test_install_oracle_jre_tarball_already(self, isdir,
- mkdir, check_call, exists):
- isdir.return_value = True
- exists.return_value = True # jre already installed
-
- # Store the version previously installed.
- hookenv.config()['oracle_jre_tarball'] = sentinel.tarball
-
- dest = '/usr/lib/jvm/java-8-oracle'
-
- actions._install_oracle_jre_tarball(sentinel.tarball)
-
- self.assertFalse(mkdir.called) # The jvm dir already existed.
-
- exists.assert_called_once_with('/usr/lib/jvm/java-8-oracle/bin/java')
-
- # update-alternatives done, but tarball not extracted.
- check_call.assert_has_calls([
- call(['update-alternatives', '--install',
- '/usr/bin/java', 'java',
- os.path.join(dest, 'bin', 'java'), '1']),
- call(['update-alternatives', '--set', 'java',
- os.path.join(dest, 'bin', 'java')]),
- call(['update-alternatives', '--install',
- '/usr/bin/javac', 'javac',
- os.path.join(dest, 'bin', 'javac'), '1']),
- call(['update-alternatives', '--set', 'javac',
- os.path.join(dest, 'bin', 'javac')])])
-
- @patch('subprocess.check_output')
- def test_emit_java_version(self, check_output):
- check_output.return_value = 'Line 1\nLine 2'
- actions.emit_java_version('')
- check_output.assert_called_once_with(['java', '-version'],
- universal_newlines=True)
- hookenv.log.assert_has_calls([call(ANY),
- call('JRE: Line 1'),
- call('JRE: Line 2')])
-
- @patch('helpers.configure_cassandra_yaml')
- def test_configure_cassandra_yaml(self, configure_cassandra_yaml):
- # actions.configure_cassandra_yaml is just a wrapper around the
- # helper.
- actions.configure_cassandra_yaml('')
- configure_cassandra_yaml.assert_called_once_with()
-
- @patch('helpers.get_cassandra_env_file')
- @patch('charmhelpers.core.host.write_file')
- def test_configure_cassandra_env(self, write_file, env_file):
- def _wf(path, contents, perms=None):
- with open(path, 'wb') as f:
- f.write(contents)
- write_file.side_effect = _wf
-
- # cassandra-env.sh is a shell script that unfortunately
- # embeds configuration we need to change.
- existing_config = dedent('''\
- Everything is ignored
- unless a regexp matches
- #MAX_HEAP_SIZE="1G"
- #HEAP_NEWSIZE="800M"
- #JMX_PORT="1234"
- And done
- ''')
-
- with tempfile.TemporaryDirectory() as tempdir:
- cassandra_env = os.path.join(tempdir, 'c.sh')
- env_file.return_value = cassandra_env
-
- with open(cassandra_env, 'w', encoding='UTF-8') as f:
- f.write(existing_config)
-
- overrides = dict(
- max_heap_size=re.compile('^MAX_HEAP_SIZE=(.*)$', re.M),
- heap_newsize=re.compile('^HEAP_NEWSIZE=(.*)$', re.M))
-
- for key in overrides:
- hookenv.config()[key] = ''
-
- # By default, the settings will be commented out.
- actions.configure_cassandra_env('')
- with open(cassandra_env, 'r', encoding='UTF-8') as f:
- generated_env = f.read()
- for config_key, regexp in overrides.items():
- with self.subTest(override=config_key):
- self.assertIsNone(regexp.search(generated_env))
-
- # Settings can be overridden.
- for config_key, regexp in overrides.items():
- hookenv.config()[config_key] = '{} val'.format(config_key)
- actions.configure_cassandra_env('')
- with open(cassandra_env, 'r') as f:
- generated_env = f.read()
- for config_key, regexp in overrides.items():
- with self.subTest(override=config_key):
- match = regexp.search(generated_env)
- self.assertIsNotNone(match)
- # Note the value has been shell quoted.
- self.assertTrue(
- match.group(1).startswith(
- "'{} val'".format(config_key)))
-
- # Settings can be returned to the defaults.
- for config_key, regexp in overrides.items():
- hookenv.config()[config_key] = ''
- actions.configure_cassandra_env('')
- with open(cassandra_env, 'r', encoding='UTF-8') as f:
- generated_env = f.read()
- for config_key, regexp in overrides.items():
- with self.subTest(override=config_key):
- self.assertIsNone(regexp.search(generated_env))
-
- @patch('helpers.get_cassandra_rackdc_file')
- def test_configure_cassandra_rackdc(self, rackdc_file):
- hookenv.config()['datacenter'] = 'test_dc'
- hookenv.config()['rack'] = 'test_rack'
- with tempfile.NamedTemporaryFile() as rackdc:
- rackdc_file.return_value = rackdc.name
- actions.configure_cassandra_rackdc('')
- with open(rackdc.name, 'r') as f:
- self.assertEqual(f.read().strip(),
- 'dc=test_dc\nrack=test_rack')
-
- @patch('helpers.connect')
- @patch('helpers.get_auth_keyspace_replication')
- @patch('helpers.num_nodes')
- def test_needs_reset_auth_keyspace_replication(self, num_nodes,
- get_auth_ks_rep,
- connect):
- num_nodes.return_value = 4
- connect().__enter__.return_value = sentinel.session
- connect().__exit__.return_value = False
- get_auth_ks_rep.return_value = {'another': '8'}
- self.assertTrue(actions.needs_reset_auth_keyspace_replication())
-
- @patch('helpers.connect')
- @patch('helpers.get_auth_keyspace_replication')
- @patch('helpers.num_nodes')
- def test_needs_reset_auth_keyspace_replication_false(self, num_nodes,
- get_auth_ks_rep,
- connect):
- config = hookenv.config()
- config['datacenter'] = 'mydc'
- connect().__enter__.return_value = sentinel.session
- connect().__exit__.return_value = False
-
- num_nodes.return_value = 3
- get_auth_ks_rep.return_value = {'another': '8',
- 'mydc': '3'}
- self.assertFalse(actions.needs_reset_auth_keyspace_replication())
-
- @patch('helpers.set_active')
- @patch('helpers.repair_auth_keyspace')
- @patch('helpers.connect')
- @patch('helpers.set_auth_keyspace_replication')
- @patch('helpers.get_auth_keyspace_replication')
- @patch('helpers.num_nodes')
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_reset_auth_keyspace_replication(self, is_leader, num_nodes,
- get_auth_ks_rep,
- set_auth_ks_rep,
- connect, repair, set_active):
- is_leader.return_value = True
- num_nodes.return_value = 4
- coordinator.grants = {}
- coordinator.requests = {hookenv.local_unit(): {}}
- coordinator.grant('repair', hookenv.local_unit())
- config = hookenv.config()
- config['datacenter'] = 'mydc'
- connect().__enter__.return_value = sentinel.session
- connect().__exit__.return_value = False
- get_auth_ks_rep.return_value = {'another': '8'}
- self.assertTrue(actions.needs_reset_auth_keyspace_replication())
- actions.reset_auth_keyspace_replication('')
- set_auth_ks_rep.assert_called_once_with(
- sentinel.session,
- {'class': 'NetworkTopologyStrategy', 'another': '8', 'mydc': 4})
- repair.assert_called_once_with()
- set_active.assert_called_once_with()
-
- def test_store_unit_private_ip(self):
- hookenv.unit_private_ip.side_effect = None
- hookenv.unit_private_ip.return_value = sentinel.ip
- actions.store_unit_private_ip('')
- self.assertEqual(hookenv.config()['unit_private_ip'], sentinel.ip)
-
- @patch('charmhelpers.core.host.service_start')
- @patch('helpers.status_set')
- @patch('helpers.actual_seed_ips')
- @patch('helpers.get_seed_ips')
- @patch('relations.StorageRelation.needs_remount')
- @patch('helpers.is_bootstrapped')
- @patch('helpers.is_cassandra_running')
- @patch('helpers.is_decommissioned')
- def test_needs_restart(self, is_decom, is_running, is_bootstrapped,
- needs_remount, seed_ips, actual_seeds,
- status_set, service_start):
- is_decom.return_value = False
- is_running.return_value = True
- needs_remount.return_value = False
- seed_ips.return_value = set(['1.2.3.4'])
- actual_seeds.return_value = set(['1.2.3.4'])
-
- config = hookenv.config()
- config['configured_seeds'] = list(sorted(seed_ips()))
- config.save()
- config.load_previous() # Ensure everything flagged as unchanged.
-
- self.assertFalse(actions.needs_restart())
-
- # Decommissioned nodes are not restarted.
- is_decom.return_value = True
- self.assertFalse(actions.needs_restart())
- is_decom.return_value = False
- self.assertFalse(actions.needs_restart())
-
- # Nodes not running need to be restarted.
- is_running.return_value = False
- self.assertTrue(actions.needs_restart())
- is_running.return_value = True
- self.assertFalse(actions.needs_restart())
-
- # If we have a new mountpoint, we need to restart in order to
- # migrate data.
- needs_remount.return_value = True
- self.assertTrue(actions.needs_restart())
- needs_remount.return_value = False
- self.assertFalse(actions.needs_restart())
-
- # Certain changed config items trigger a restart.
- config['max_heap_size'] = '512M'
- self.assertTrue(actions.needs_restart())
- config.save()
- config.load_previous()
- self.assertFalse(actions.needs_restart())
-
- # A new IP address requires a restart.
- config['unit_private_ip'] = 'new'
- self.assertTrue(actions.needs_restart())
- config.save()
- config.load_previous()
- self.assertFalse(actions.needs_restart())
-
- # If the seeds have changed, we need to restart.
- seed_ips.return_value = set(['9.8.7.6'])
- actual_seeds.return_value = set(['9.8.7.6'])
- self.assertTrue(actions.needs_restart())
- is_running.side_effect = iter([False, True])
- helpers.start_cassandra()
- is_running.side_effect = None
- is_running.return_value = True
- self.assertFalse(actions.needs_restart())
-
- @patch('charmhelpers.core.hookenv.is_leader')
- @patch('helpers.is_bootstrapped')
- @patch('helpers.ensure_database_directories')
- @patch('helpers.remount_cassandra')
- @patch('helpers.start_cassandra')
- @patch('helpers.stop_cassandra')
- @patch('helpers.status_set')
- def test_maybe_restart(self, status_set, stop_cassandra, start_cassandra,
- remount, ensure_directories, is_bootstrapped,
- is_leader):
- coordinator.grants = {}
- coordinator.requests = {hookenv.local_unit(): {}}
- coordinator.relid = 'cluster:1'
- coordinator.grant('restart', hookenv.local_unit())
- actions.maybe_restart('')
- stop_cassandra.assert_called_once_with()
- remount.assert_called_once_with()
- ensure_directories.assert_called_once_with()
- start_cassandra.assert_called_once_with()
-
- @patch('helpers.stop_cassandra')
- def test_stop_cassandra(self, helpers_stop_cassandra):
- actions.stop_cassandra('ignored')
- helpers_stop_cassandra.assert_called_once_with()
-
- @patch('helpers.start_cassandra')
- def test_start_cassandra(self, helpers_start_cassandra):
- actions.start_cassandra('ignored')
- helpers_start_cassandra.assert_called_once_with()
-
- @patch('os.path.isdir')
- @patch('helpers.get_all_database_directories')
- @patch('helpers.set_io_scheduler')
- def test_reset_all_io_schedulers(self, set_io_scheduler, dbdirs, isdir):
- hookenv.config()['io_scheduler'] = sentinel.io_scheduler
- dbdirs.return_value = dict(
- data_file_directories=[sentinel.d1, sentinel.d2],
- commitlog_directory=sentinel.cl,
- saved_caches_directory=sentinel.sc)
- isdir.return_value = True
- actions.reset_all_io_schedulers('')
- set_io_scheduler.assert_has_calls([
- call(sentinel.io_scheduler, sentinel.d1),
- call(sentinel.io_scheduler, sentinel.d2),
- call(sentinel.io_scheduler, sentinel.cl),
- call(sentinel.io_scheduler, sentinel.sc)],
- any_order=True)
-
- # If directories don't exist yet, nothing happens.
- set_io_scheduler.reset_mock()
- isdir.return_value = False
- actions.reset_all_io_schedulers('')
- self.assertFalse(set_io_scheduler.called)
-
- def test_config_key_lists_complete(self):
- # Ensure that we have listed all keys in either
- # RESTART_REQUIRED_KEYS, RESTART_NOT_REQUIRED_KEYS or
- # UNCHANGEABLE_KEYS. This is to ensure that RESTART_REQUIRED_KEYS
- # is maintained as new config items are added over time.
- config_path = os.path.join(os.path.dirname(__file__), os.pardir,
- 'config.yaml')
- with open(config_path, 'r') as f:
- config = yaml.safe_load(f)
-
- combined = actions.RESTART_REQUIRED_KEYS.union(
- actions.RESTART_NOT_REQUIRED_KEYS).union(
- actions.UNCHANGEABLE_KEYS)
-
- for key in config['options']:
- with self.subTest(key=key):
- self.assertIn(key, combined)
-
- @patch('actions._publish_database_relation')
- def test_publish_database_relations(self, publish_db_rel):
- actions.publish_database_relations('')
- publish_db_rel.assert_called_once_with('database:1', superuser=False)
-
- @patch('actions._publish_database_relation')
- def test_publish_database_admin_relations(self, publish_db_rel):
- actions.publish_database_admin_relations('')
- publish_db_rel.assert_called_once_with('database-admin:1',
- superuser=True)
-
- @patch('helpers.leader_ping')
- @patch('helpers.ensure_user')
- @patch('helpers.connect')
- @patch('helpers.get_service_name')
- @patch('helpers.encrypt_password')
- @patch('charmhelpers.core.host.pwgen')
- @patch('charmhelpers.core.hookenv.is_leader')
- @patch('actions._client_credentials')
- def test_publish_database_relation_leader(self, client_creds, is_leader,
- pwgen, encrypt_password,
- get_service_name,
- connect, ensure_user,
- leader_ping):
- is_leader.return_value = True # We are the leader.
- client_creds.return_value = (None, None) # No creds published yet.
-
- get_service_name.return_value = 'cservice'
- pwgen.side_effect = iter(['secret1', 'secret2'])
- encrypt_password.side_effect = iter(['crypt1', 'crypt2'])
- connect().__enter__.return_value = sentinel.session
-
- config = hookenv.config()
- config['native_transport_port'] = 666
- config['rpc_port'] = 777
- config['cluster_name'] = 'fred'
- config['datacenter'] = 'mission_control'
- config['rack'] = '01'
-
- actions._publish_database_relation('database:1', superuser=False)
-
- ensure_user.assert_called_once_with(sentinel.session,
- 'juju_cservice', 'crypt1',
- False)
- leader_ping.assert_called_once_with() # Peers woken.
-
- hookenv.relation_set.assert_has_calls([
- call('database:1',
- username='juju_cservice', password='secret1',
- host='10.30.0.1', native_transport_port=666, rpc_port=777,
- cluster_name='fred', datacenter='mission_control',
- rack='01')])
-
- @patch('helpers.leader_ping')
- @patch('helpers.ensure_user')
- @patch('helpers.connect')
- @patch('helpers.get_service_name')
- @patch('helpers.encrypt_password')
- @patch('charmhelpers.core.host.pwgen')
- @patch('charmhelpers.core.hookenv.is_leader')
- @patch('actions._client_credentials')
- def test_publish_database_relation_super(self, client_creds, is_leader,
- pwgen, encrypt_password,
- get_service_name,
- connect, ensure_user,
- leader_ping):
- is_leader.return_value = True # We are the leader.
- client_creds.return_value = (None, None) # No creds published yet.
-
- get_service_name.return_value = 'cservice'
- pwgen.side_effect = iter(['secret1', 'secret2'])
- encrypt_password.side_effect = iter(['crypt1', 'crypt2'])
- connect().__enter__.return_value = sentinel.session
-
- config = hookenv.config()
- config['native_transport_port'] = 666
- config['rpc_port'] = 777
- config['cluster_name'] = 'fred'
- config['datacenter'] = 'mission_control'
- config['rack'] = '01'
-
- actions._publish_database_relation('database:1', superuser=True)
-
- ensure_user.assert_called_once_with(sentinel.session,
- 'juju_cservice_admin', 'crypt1',
- True)
-
- @patch('charmhelpers.core.host.write_file')
- def test_install_maintenance_crontab(self, write_file):
- # First 7 units get distributed, one job per day.
- hookenv.local_unit.return_value = 'foo/0'
- actions.install_maintenance_crontab('')
- write_file.assert_called_once_with('/etc/cron.d/cassandra-maintenance',
- ANY)
- contents = write_file.call_args[0][1]
- # Not the complete command, but includes all the expanded
- # variables.
- expected = (b'\n0 0 * * 0 cassandra run-one-until-success '
- b'nodetool repair -pr')
- self.assertIn(expected, contents)
-
- # Next 7 units distributed 12 hours out of sync with the first
- # batch.
- hookenv.local_unit.return_value = 'foo/8'
- actions.install_maintenance_crontab('')
- contents = write_file.call_args[0][1]
- expected = (b'\n0 12 * * 1 cassandra run-one-until-success '
- b'nodetool repair -pr')
- self.assertIn(expected, contents)
-
- # Later units per helpers.week_spread()
- hookenv.local_unit.return_value = 'foo/411'
- actions.install_maintenance_crontab('')
- contents = write_file.call_args[0][1]
- expected = (b'\n37 8 * * 5 cassandra run-one-until-success '
- b'nodetool repair -pr')
- self.assertIn(expected, contents)
-
- @patch('helpers.emit_netstats')
- @patch('helpers.emit_status')
- @patch('helpers.emit_describe_cluster')
- def test_emit_cluster_info(self, emit_desc, emit_status, emit_netstats):
- actions.emit_cluster_info('')
- emit_desc.assert_called_once_with()
- emit_status.assert_called_once_with()
- emit_netstats.assert_called_once_with()
-
- @patch('charmhelpers.core.hookenv.relations_of_type')
- @patch('actions.ufw')
- def test_configure_firewall(self, ufw, rel_of_type):
- rel_of_type.side_effect = iter([[{'private-address': '1.1.0.1'},
- {'private-address': '1.1.0.2'}],
- []])
- actions.configure_firewall('')
-
- # Confirm our mock provided the expected data.
- rel_of_type.assert_has_calls([call('cluster'), call('database-admin')])
-
- ufw.enable.assert_called_once_with(soft_fail=True) # Always enabled.
-
- # SSH and the client protocol ports are always fully open.
- ufw.service.assert_has_calls([call('ssh', 'open'),
- call('nrpe', 'open'),
- call('rsync', 'open'),
- call(9042, 'open'),
- call(9160, 'open')])
-
- # This test is running for the first time, so there are no
- # previously applied rules to remove. It opens necessary access
- # to peers and other related units. The 1.1.* addresses are
- # peers, and they get storage (7000), ssl_storage (7001),
- # JMX (7199), Thrift (9160) and native (9042). The remaining
- # addresses are clients, getting just Thrift and native.
- ufw.grant_access.assert_has_calls([call('1.1.0.1', 'any', 7000),
- call('1.1.0.1', 'any', 7001),
-
- call('1.1.0.2', 'any', 7000),
- call('1.1.0.2', 'any', 7001)],
- any_order=True)
-
- # If things change in a later hook, unwanted rules are removed
- # and new ones added.
- config = hookenv.config()
- config.save()
- config.load_previous()
- config['native_transport_port'] = 7777 # 9042 -> 7777
- config['storage_port'] = 7002 # 7000 -> 7002
- config['open_client_ports'] = True
- ufw.reset_mock()
-
- rel_of_type.side_effect = iter([[],
- [{'private-address': '1.1.0.1'},
- {'private-address': '1.1.0.2'}]])
- actions.configure_firewall('')
-
- # Three ports now globally open. Yes, having the globally open
- # native and Thrift ports does make the later more specific
- # rules meaningless, but we add the specific rules anyway.
- ufw.service.assert_has_calls([call('ssh', 'open'),
- call('nrpe', 'open'),
- call(9042, 'close'),
- call(7777, 'open'),
- call(9160, 'open')], any_order=True)
- ufw.revoke_access.assert_has_calls([call('1.1.0.1', 'any', 7000),
- call('1.1.0.2', 'any', 7000)],
- any_order=True)
- ufw.grant_access.assert_has_calls([call('1.1.0.1', 'any', 7001),
- call('1.1.0.1', 'any', 7002),
- call('1.1.0.2', 'any', 7001),
- call('1.1.0.2', 'any', 7002)],
- any_order=True)
-
- @patch('helpers.mountpoint')
- @patch('helpers.get_cassandra_version')
- @patch('charmhelpers.core.host.write_file')
- @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
- @patch('helpers.local_plugins_dir')
- def test_nrpe_external_master_relation(self, local_plugins_dir, nrpe,
- write_file, cassandra_version,
- mountpoint):
- mountpoint.side_effect = os.path.dirname
- cassandra_version.return_value = '2.2'
- # The fake charm_dir() needs populating.
- plugin_src_dir = os.path.join(os.path.dirname(__file__),
- os.pardir, 'files')
- shutil.copytree(plugin_src_dir,
- os.path.join(hookenv.charm_dir(), 'files'))
-
- with tempfile.TemporaryDirectory() as d:
- local_plugins_dir.return_value = d
- actions.nrpe_external_master_relation('')
-
- # The expected file was written to the expected filename
- # with required perms.
- with open(os.path.join(plugin_src_dir, 'check_cassandra_heap.sh'),
- 'rb') as f:
- write_file.assert_called_once_with(
- os.path.join(d, 'check_cassandra_heap.sh'), f.read(),
- perms=0o555)
-
- nrpe().add_check.assert_has_calls([
- call(shortname='cassandra_heap',
- description='Check Cassandra Heap',
- check_cmd='check_cassandra_heap.sh localhost 80 90'),
- call(description=('Check Cassandra Disk '
- '/var/lib/cassandra'),
- shortname='cassandra_disk_var_lib_cassandra',
- check_cmd=('check_disk -u GB -w 50% -c 25% -K 5% '
- '-p /var/lib/cassandra'))],
- any_order=True)
-
- nrpe().write.assert_called_once_with()
-
- @patch('helpers.get_cassandra_version')
- @patch('charmhelpers.core.host.write_file')
- @patch('os.path.exists')
- @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
- def test_nrpe_external_master_relation_no_local(self, nrpe, exists,
- write_file, ver):
- ver.return_value = '2.2'
- # If the local plugins directory doesn't exist, we don't attempt
- # to write files to it. Wait until the subordinate has set it
- # up.
- exists.return_value = False
- actions.nrpe_external_master_relation('')
- self.assertFalse(write_file.called)
-
- @patch('helpers.mountpoint')
- @patch('helpers.get_cassandra_version')
- @patch('os.path.exists')
- @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
- def test_nrpe_external_master_relation_disable_heapchk(self, nrpe, exists,
- ver, mountpoint):
- ver.return_value = '2.2'
- exists.return_value = False
- mountpoint.side_effect = os.path.dirname
-
- # Disable our checks
- config = hookenv.config()
- config['nagios_heapchk_warn_pct'] = 0 # Only one needs to be disabled.
- config['nagios_heapchk_crit_pct'] = 90
-
- actions.nrpe_external_master_relation('')
- exists.assert_called_once_with(helpers.local_plugins_dir())
-
- nrpe().add_check.assert_has_calls([
- call(shortname='cassandra_disk_var_lib_cassandra',
- description=ANY, check_cmd=ANY)], any_order=True)
-
- @patch('helpers.get_cassandra_version')
- @patch('os.path.exists')
- @patch('charmhelpers.contrib.charmsupport.nrpe.NRPE')
- def test_nrpe_external_master_relation_disable_diskchk(self, nrpe,
- exists, ver):
- ver.return_value = '2.2'
- exists.return_value = False
-
- # Disable our checks
- config = hookenv.config()
- config['nagios_disk_warn_pct'] = 0 # Only one needs to be disabled.
- config['magios_disk_crit_pct'] = 50
-
- actions.nrpe_external_master_relation('')
- exists.assert_called_once_with(helpers.local_plugins_dir())
-
- nrpe().add_check.assert_called_once_with(shortname='cassandra_heap',
- description=ANY,
- check_cmd=ANY)
-
- @patch('helpers.get_bootstrapped_ips')
- @patch('helpers.get_seed_ips')
- @patch('charmhelpers.core.hookenv.leader_set')
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_maintain_seeds(self, is_leader, leader_set,
- seed_ips, bootstrapped_ips):
- is_leader.return_value = True
-
- seed_ips.return_value = set(['1.2.3.4'])
- bootstrapped_ips.return_value = set(['2.2.3.4', '3.2.3.4',
- '4.2.3.4', '5.2.3.4'])
-
- actions.maintain_seeds('')
- leader_set.assert_called_once_with(seeds='2.2.3.4,3.2.3.4,4.2.3.4')
-
- @patch('helpers.get_bootstrapped_ips')
- @patch('helpers.get_seed_ips')
- @patch('charmhelpers.core.hookenv.leader_set')
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_maintain_seeds_start(self, is_leader, leader_set,
- seed_ips, bootstrapped_ips):
- seed_ips.return_value = set()
- bootstrapped_ips.return_value = set()
- actions.maintain_seeds('')
- # First seed is the first leader, which lets is get everything
- # started.
- leader_set.assert_called_once_with(seeds=hookenv.unit_private_ip())
-
- @patch('charmhelpers.core.host.pwgen')
- @patch('helpers.query')
- @patch('helpers.set_unit_superusers')
- @patch('helpers.ensure_user')
- @patch('helpers.encrypt_password')
- @patch('helpers.superuser_credentials')
- @patch('helpers.connect')
- @patch('charmhelpers.core.hookenv.is_leader')
- @patch('charmhelpers.core.hookenv.leader_set')
- @patch('charmhelpers.core.hookenv.leader_get')
- def test_reset_default_password(self, leader_get, leader_set, is_leader,
- connect, sup_creds, encrypt_password,
- ensure_user, set_sups, query, pwgen):
- is_leader.return_value = True
- leader_get.return_value = None
- connect().__enter__.return_value = sentinel.session
- connect().__exit__.return_value = False
- connect.reset_mock()
-
- sup_creds.return_value = (sentinel.username, sentinel.password)
- encrypt_password.return_value = sentinel.pwhash
- pwgen.return_value = sentinel.random_password
-
- actions.reset_default_password('')
-
- # First, a superuser account for the unit was created.
- connect.assert_called_once_with('cassandra', 'cassandra',
- timeout=120, auth_timeout=120)
- encrypt_password.assert_called_once_with(sentinel.password)
- ensure_user.assert_called_once_with(sentinel.session,
- sentinel.username,
- sentinel.pwhash,
- superuser=True)
- set_sups.assert_called_once_with([hookenv.local_unit()])
-
- # After that, the default password is reset.
- query.assert_called_once_with(sentinel.session,
- 'ALTER USER cassandra WITH PASSWORD %s',
- cassandra.ConsistencyLevel.ALL,
- (sentinel.random_password,))
-
- # Flag stored to avoid attempting this again.
- leader_set.assert_called_once_with(default_admin_password_changed=True)
-
- @patch('helpers.connect')
- @patch('charmhelpers.core.hookenv.is_leader')
- @patch('charmhelpers.core.hookenv.leader_get')
- def test_reset_default_password_noop(self, leader_get, is_leader, connect):
- leader_get.return_value = True
- is_leader.return_value = True
- actions.reset_default_password('') # noop
- self.assertFalse(connect.called)
-
- @patch('helpers.get_seed_ips')
- @patch('helpers.status_set')
- @patch('charmhelpers.core.hookenv.status_get')
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_set_active(self, is_leader, status_get, status_set, seed_ips):
- is_leader.return_value = False
- status_get.return_value = ('waiting', '')
- seed_ips.return_value = set()
- actions.set_active('')
- status_set.assert_called_once_with('active', 'Live node')
-
- @patch('helpers.get_seed_ips')
- @patch('helpers.status_set')
- @patch('charmhelpers.core.hookenv.status_get')
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_set_active_seed(self, is_leader,
- status_get, status_set, seed_ips):
- is_leader.return_value = False
- status_get.return_value = ('waiting', '')
- seed_ips.return_value = set([hookenv.unit_private_ip()])
- actions.set_active('')
- status_set.assert_called_once_with('active', 'Live seed')
-
- @patch('helpers.num_nodes')
- @patch('helpers.get_seed_ips')
- @patch('helpers.service_status_set')
- @patch('helpers.status_set')
- @patch('charmhelpers.core.hookenv.status_get')
- @patch('charmhelpers.core.hookenv.is_leader')
- def test_set_active_service(self, is_leader,
- status_get, status_set, service_status_set,
- seed_ips, num_nodes):
- status_get.return_value = ('waiting', '')
- is_leader.return_value = True
- seed_ips.return_value = set([hookenv.unit_private_ip()])
- num_nodes.return_value = 1
- actions.set_active('')
- service_status_set.assert_called_once_with('active',
- 'Single node cluster')
-
- service_status_set.reset_mock()
- num_nodes.return_value = 6
- actions.set_active('')
- service_status_set.assert_called_once_with('active',
- '6 node cluster')
-
- @patch('helpers.encrypt_password')
- @patch('helpers.superuser_credentials')
- @patch('helpers.peer_relid')
- def test_request_unit_superuser(self, peer_relid, sup_creds, crypt):
- peer_relid.return_value = sentinel.peer_relid
- sup_creds.return_value = (sentinel.username, sentinel.password)
- crypt.return_value = sentinel.pwhash
- hookenv.relation_get.return_value = dict()
- actions.request_unit_superuser('')
- hookenv.relation_set.assert_called_once_with(
- sentinel.peer_relid,
- username=sentinel.username, pwhash=sentinel.pwhash)
-
- @patch('helpers.update_hosts_file')
- @patch('socket.gethostname')
- def test_update_etc_hosts(self, gethostname, update_hosts_file):
- gethostname.return_value = sentinel.hostname
- actions.update_etc_hosts('')
- update_hosts_file.assert_called_once_with(
- '/etc/hosts', {'10.20.0.1': sentinel.hostname})
-
-
-if __name__ == '__main__':
- unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/test_definitions.py b/charms/trusty/cassandra/tests/test_definitions.py
deleted file mode 100755
index 98103c0..0000000
--- a/charms/trusty/cassandra/tests/test_definitions.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!.venv3/bin/python3
-
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from itertools import chain
-import functools
-import unittest
-from unittest.mock import patch
-
-from charmhelpers.core import hookenv
-from charmhelpers.core.services import ServiceManager
-
-from tests.base import TestCaseBase
-
-import definitions
-
-
-patch = functools.partial(patch, autospec=True)
-
-
-class TestDefinitions(TestCaseBase):
- def test_get_service_definitions(self):
- # We can't really test this in unit tests, but at least we can
- # ensure the basic data structure is returned and accepted.
- defs = definitions.get_service_definitions()
- self.assertIsInstance(defs, list)
- for d in defs:
- with self.subTest(d=d):
- self.assertIsInstance(d, dict)
-
- def test_get_service_definitions_open_ports(self):
- config = hookenv.config()
- defs = definitions.get_service_definitions()
- expected_ports = set([config['rpc_port'],
- config['native_transport_port'],
- config['storage_port'],
- config['ssl_storage_port']])
- opened_ports = set(chain(*(d.get('ports', []) for d in defs)))
- self.assertSetEqual(opened_ports, expected_ports)
-
- def test_get_service_manager(self):
- self.assertIsInstance(definitions.get_service_manager(),
- ServiceManager)
-
- @patch('helpers.get_unit_superusers')
- @patch('helpers.is_decommissioned')
- @patch('helpers.is_cassandra_running')
- def test_requires_live_node(self, is_running, is_decommissioned, get_sup):
- is_decommissioned.return_value = False # Is not decommissioned.
- is_running.return_value = True # Is running.
- get_sup.return_value = set([hookenv.local_unit()]) # Creds exist.
-
- self.assertTrue(bool(definitions.RequiresLiveNode()))
-
- @patch('helpers.get_unit_superusers')
- @patch('helpers.is_decommissioned')
- @patch('helpers.is_cassandra_running')
- def test_requires_live_node_decommissioned(self, is_running,
- is_decommissioned, get_sup):
- is_decommissioned.return_value = True # Is decommissioned.
- is_running.return_value = True # Is running.
- get_sup.return_value = set([hookenv.local_unit()]) # Creds exist.
-
- self.assertFalse(bool(definitions.RequiresLiveNode()))
-
- @patch('helpers.get_unit_superusers')
- @patch('helpers.is_decommissioned')
- @patch('helpers.is_cassandra_running')
- def test_requires_live_node_down(self, is_running,
- is_decommissioned, get_sup):
- is_decommissioned.return_value = False # Is not decommissioned.
- is_running.return_value = False # Is not running.
- get_sup.return_value = set([hookenv.local_unit()]) # Creds exist.
-
- self.assertFalse(bool(definitions.RequiresLiveNode()))
-
- @patch('helpers.get_unit_superusers')
- @patch('helpers.is_decommissioned')
- @patch('helpers.is_cassandra_running')
- def test_requires_live_node_creds(self, is_running,
- is_decommissioned, get_sup):
- is_decommissioned.return_value = False # Is not decommissioned.
- is_running.return_value = True # Is running.
- get_sup.return_value = set() # Creds do not exist.
-
- self.assertFalse(bool(definitions.RequiresLiveNode()))
-
-
-if __name__ == '__main__':
- unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/test_helpers.py b/charms/trusty/cassandra/tests/test_helpers.py
deleted file mode 100755
index 92fa1e8..0000000
--- a/charms/trusty/cassandra/tests/test_helpers.py
+++ /dev/null
@@ -1,1466 +0,0 @@
-#!.venv3/bin/python3
-
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from collections import namedtuple
-import errno
-import functools
-from itertools import repeat
-import os.path
-import subprocess
-import tempfile
-from textwrap import dedent
-import unittest
-from unittest.mock import ANY, call, MagicMock, patch, sentinel
-
-from cassandra import AuthenticationFailed, ConsistencyLevel
-from cassandra.cluster import NoHostAvailable
-import yaml
-
-from charmhelpers import fetch
-from charmhelpers.core import hookenv, host
-
-from tests.base import TestCaseBase
-import helpers
-
-
-patch = functools.partial(patch, autospec=True)
-
-
-class TestHelpers(TestCaseBase):
- @patch('time.sleep')
- def test_backoff(self, sleep):
- i = 0
- for _ in helpers.backoff('foo to bar'):
- i += 1
- if i == 10:
- break
- sleep.assert_has_calls([
- call(2), call(4), call(8), call(16), call(32),
- call(60), call(60), call(60), call(60)])
-
- i = 0
- for _ in helpers.backoff('foo to bar', max_pause=10):
- i += 1
- if i == 10:
- break
- sleep.assert_has_calls([
- call(2), call(4), call(8), call(10), call(10),
- call(10), call(10), call(10), call(10)])
-
- def test_autostart_disabled(self):
- with tempfile.TemporaryDirectory() as tmpdir:
-
- prc = os.path.join(tmpdir, 'policy-rc.d')
- prc_backup = prc + '-orig'
-
- with helpers.autostart_disabled(_policy_rc=prc):
- # No existing policy-rc.d, so no backup made.
- self.assertFalse(os.path.exists(prc_backup))
-
- # A policy-rc.d file has been created that will disable
- # package autostart per spec (ie. returns a 101 exit code).
- self.assertTrue(os.path.exists(prc))
- self.assertEqual(subprocess.call([prc]), 101)
-
- with helpers.autostart_disabled(_policy_rc=prc):
- # A second time, we have a backup made.
- # policy-rc.d still works
- self.assertTrue(os.path.exists(prc_backup))
- self.assertEqual(subprocess.call([prc]), 101)
-
- # Backup removed, and policy-rc.d still works.
- self.assertFalse(os.path.exists(prc_backup))
- self.assertEqual(subprocess.call([prc]), 101)
-
- # Neither backup nor policy-rc.d exist now we are out of the
- # context manager.
- self.assertFalse(os.path.exists(prc_backup))
- self.assertFalse(os.path.exists(prc))
-
- def test_autostart_disabled_partial(self):
- with tempfile.TemporaryDirectory() as tmpdir:
-
- prc = os.path.join(tmpdir, 'policy-rc.d')
- prc_backup = prc + '-orig'
-
- with helpers.autostart_disabled(['foo', 'bar'], _policy_rc=prc):
- # No existing policy-rc.d, so no backup made.
- self.assertFalse(os.path.exists(prc_backup))
-
- # A policy-rc.d file has been created that will disable
- # package autostart per spec (ie. returns a 101 exit code).
- self.assertTrue(os.path.exists(prc))
- self.assertEqual(subprocess.call([prc, 'foo']), 101)
- self.assertEqual(subprocess.call([prc, 'bar']), 101)
- self.assertEqual(subprocess.call([prc, 'baz']), 0)
-
- # Neither backup nor policy-rc.d exist now we are out of the
- # context manager.
- self.assertFalse(os.path.exists(prc_backup))
- self.assertFalse(os.path.exists(prc))
-
- @patch('helpers.autostart_disabled')
- @patch('charmhelpers.fetch.apt_install')
- def test_install_packages(self, apt_install, autostart_disabled):
- packages = ['a_pack', 'b_pack']
- helpers.install_packages(packages)
-
- # All packages got installed, and hook aborted if package
- # installation failed.
- apt_install.assert_called_once_with(['a_pack', 'b_pack'], fatal=True)
-
- # The autostart_disabled context manager was used to stop
- # package installation starting services.
- autostart_disabled().__enter__.assert_called_once_with()
- autostart_disabled().__exit__.assert_called_once_with(None, None, None)
-
- @patch('helpers.autostart_disabled')
- @patch('charmhelpers.fetch.apt_install')
- def test_install_packages_extras(self, apt_install, autostart_disabled):
- packages = ['a_pack', 'b_pack']
- hookenv.config()['extra_packages'] = 'c_pack d_pack'
- helpers.install_packages(packages)
-
- # All packages got installed, and hook aborted if package
- # installation failed.
- apt_install.assert_called_once_with(['a_pack', 'b_pack',
- 'c_pack', 'd_pack'], fatal=True)
-
- # The autostart_disabled context manager was used to stop
- # package installation starting services.
- autostart_disabled().__enter__.assert_called_once_with()
- autostart_disabled().__exit__.assert_called_once_with(None, None, None)
-
- @patch('helpers.autostart_disabled')
- @patch('charmhelpers.fetch.apt_install')
- def test_install_packages_noop(self, apt_install, autostart_disabled):
- # Everything is already installed. Nothing to do.
- fetch.filter_installed_packages.side_effect = lambda pkgs: []
-
- packages = ['a_pack', 'b_pack']
- hookenv.config()['extra_packages'] = 'c_pack d_pack'
- helpers.install_packages(packages)
-
- # All packages got installed, and hook aborted if package
- # installation failed.
- self.assertFalse(apt_install.called)
-
- # Autostart wasn't messed with.
- self.assertFalse(autostart_disabled.called)
-
- @patch('subprocess.Popen')
- def test_ensure_package_status(self, popen):
- for status in ['install', 'hold']:
- with self.subTest(status=status):
- popen.reset_mock()
- hookenv.config()['package_status'] = status
- helpers.ensure_package_status(['a_pack', 'b_pack'])
-
- selections = 'a_pack {}\nb_pack {}\n'.format(
- status, status).encode('US-ASCII')
-
- self.assertEqual(
- [call(['dpkg', '--set-selections'], stdin=subprocess.PIPE),
- call().communicate(input=selections)], popen.mock_calls)
-
- popen.reset_mock()
- hookenv.config()['package_status'] = 'invalid'
- self.assertRaises(RuntimeError,
- helpers.ensure_package_status, ['a_pack', 'b_back'])
- self.assertFalse(popen.called)
-
- @patch('charmhelpers.core.hookenv.leader_get')
- def test_get_seed_ips(self, leader_get):
- leader_get.return_value = '1.2.3.4,5.6.7.8'
- self.assertSetEqual(helpers.get_seed_ips(), set(['1.2.3.4',
- '5.6.7.8']))
-
- @patch('helpers.read_cassandra_yaml')
- def test_actual_seed_ips(self, read_yaml):
- read_yaml.return_value = yaml.load(dedent('''\
- seed_provider:
- - class_name: blah
- parameters:
- - seeds: a,b,c
- '''))
- self.assertSetEqual(helpers.actual_seed_ips(),
- set(['a', 'b', 'c']))
-
- @patch('relations.StorageRelation')
- def test_get_database_directory(self, storage_relation):
- storage_relation().mountpoint = None
-
- # Relative paths are relative to /var/lib/cassandra
- self.assertEqual(helpers.get_database_directory('bar'),
- '/var/lib/cassandra/bar')
-
- # If there is an external mount, relative paths are relative to
- # it. Note the extra 'cassandra' directory - life is easier
- # if we store all our data in a subdirectory on the external
- # mount rather than in its root.
- storage_relation().mountpoint = '/srv/foo'
- self.assertEqual(helpers.get_database_directory('bar'),
- '/srv/foo/cassandra/bar')
-
- # Absolute paths are absolute and passed through unmolested.
- self.assertEqual(helpers.get_database_directory('/bar'), '/bar')
-
- @patch('helpers.get_cassandra_version')
- @patch('relations.StorageRelation')
- def test_get_all_database_directories(self, storage_relation, ver):
- ver.return_value = '2.2'
- storage_relation().mountpoint = '/s'
- self.assertDictEqual(
- helpers.get_all_database_directories(),
- dict(data_file_directories=['/s/cassandra/data'],
- commitlog_directory='/s/cassandra/commitlog',
- saved_caches_directory='/s/cassandra/saved_caches'))
-
- @patch('helpers.get_cassandra_version')
- @patch('relations.StorageRelation')
- def test_get_all_database_directories_30(self, storage_relation, ver):
- ver.return_value = '3.0'
- storage_relation().mountpoint = '/s'
- self.assertDictEqual(
- helpers.get_all_database_directories(),
- dict(data_file_directories=['/s/cassandra/data'],
- commitlog_directory='/s/cassandra/commitlog',
- saved_caches_directory='/s/cassandra/saved_caches',
- hints_directory='/s/cassandra/hints'))
-
- @patch('helpers.recursive_chown')
- @patch('charmhelpers.core.host.mkdir')
- @patch('helpers.get_database_directory')
- @patch('helpers.is_cassandra_running')
- def test_ensure_database_directory(self, is_running, get_db_dir, mkdir,
- recursive_chown):
- absdir = '/an/absolute/dir'
- is_running.return_value = False
- get_db_dir.return_value = absdir
-
- # ensure_database_directory() returns the absolute path.
- self.assertEqual(helpers.ensure_database_directory(absdir), absdir)
-
- # The directory will have been made.
- mkdir.assert_has_calls([
- call('/an'),
- call('/an/absolute'),
- call('/an/absolute/dir',
- owner='cassandra', group='cassandra', perms=0o750)])
-
- # The ownership of the contents has not been reset. Rather than
- # attempting to remount an existing database, which requires
- # resetting permissions, it is better to use sstableloader to
- # import the data into the cluster.
- self.assertFalse(recursive_chown.called)
-
- @patch('charmhelpers.core.host.write_file')
- @patch('os.path.isdir')
- @patch('subprocess.check_output')
- def test_set_io_scheduler(self, check_output, isdir, write_file):
- # Normal operation, the device is detected and the magic
- # file written.
- check_output.return_value = 'foo\n/dev/sdq 1 2 3 1% /foo\n'
- isdir.return_value = True
-
- helpers.set_io_scheduler('fnord', '/foo')
-
- write_file.assert_called_once_with('/sys/block/sdq/queue/scheduler',
- b'fnord', perms=0o644)
-
- # Some OSErrors we log warnings for, and continue.
- for e in (errno.EACCES, errno.ENOENT):
- with self.subTest(errno=e):
- write_file.side_effect = repeat(OSError(e, 'Whoops'))
- hookenv.log.reset_mock()
- helpers.set_io_scheduler('fnord', '/foo')
- hookenv.log.assert_has_calls([call(ANY),
- call(ANY, hookenv.WARNING)])
-
- # Other OSErrors just fail hard.
- write_file.side_effect = iter([OSError(errno.EFAULT, 'Whoops')])
- self.assertRaises(OSError, helpers.set_io_scheduler, 'fnord', '/foo')
-
- # If we are not under lxc, nothing happens at all except a log
- # message.
- helpers.is_lxc.return_value = True
- hookenv.log.reset_mock()
- write_file.reset_mock()
- helpers.set_io_scheduler('fnord', '/foo')
- self.assertFalse(write_file.called)
- hookenv.log.assert_called_once_with(ANY) # A single INFO message.
-
- @patch('shutil.chown')
- def test_recursive_chown(self, chown):
- with tempfile.TemporaryDirectory() as tmpdir:
- os.makedirs(os.path.join(tmpdir, 'a', 'bb', 'ccc'))
- with open(os.path.join(tmpdir, 'top file'), 'w') as f:
- f.write('top file')
- with open(os.path.join(tmpdir, 'a', 'bb', 'midfile'), 'w') as f:
- f.write('midfile')
- helpers.recursive_chown(tmpdir, 'un', 'gn')
- chown.assert_has_calls(
- [call(os.path.join(tmpdir, 'a'), 'un', 'gn'),
- call(os.path.join(tmpdir, 'a', 'bb'), 'un', 'gn'),
- call(os.path.join(tmpdir, 'a', 'bb', 'ccc'), 'un', 'gn'),
- call(os.path.join(tmpdir, 'top file'), 'un', 'gn'),
- call(os.path.join(tmpdir, 'a', 'bb', 'midfile'), 'un', 'gn')],
- any_order=True)
-
- def test_maybe_backup(self):
- with tempfile.TemporaryDirectory() as tmpdir:
- # Our file is backed up to a .orig
- path = os.path.join(tmpdir, 'foo.conf')
- host.write_file(path, b'hello', perms=0o644)
- helpers.maybe_backup(path)
- path_orig = path + '.orig'
- self.assertTrue(os.path.exists(path_orig))
- with open(path_orig, 'rb') as f:
- self.assertEqual(f.read(), b'hello')
- # Safe permissions
- self.assertEqual(os.lstat(path_orig).st_mode & 0o777, 0o600)
-
- # A second call, nothing happens as the .orig is already
- # there.
- host.write_file(path, b'second')
- helpers.maybe_backup(path)
- with open(path_orig, 'rb') as f:
- self.assertEqual(f.read(), b'hello')
-
- @patch('charmhelpers.fetch.apt_cache')
- def test_get_package_version(self, apt_cache):
- version = namedtuple('Version', 'ver_str')('1.0-foo')
- package = namedtuple('Package', 'current_ver')(version)
- apt_cache.return_value = dict(package=package)
- ver = helpers.get_package_version('package')
- self.assertEqual(ver, '1.0-foo')
-
- @patch('charmhelpers.fetch.apt_cache')
- def test_get_package_version_not_found(self, apt_cache):
- version = namedtuple('Version', 'ver_str')('1.0-foo')
- package = namedtuple('Package', 'current_ver')(version)
- apt_cache.return_value = dict(package=package)
- self.assertIsNone(helpers.get_package_version('notfound'))
-
- @patch('charmhelpers.fetch.apt_cache')
- def test_get_package_version_not_installed(self, apt_cache):
- package = namedtuple('Package', 'current_ver')(None)
- apt_cache.return_value = dict(package=package)
- self.assertIsNone(helpers.get_package_version('package'))
-
- def test_get_jre(self):
- hookenv.config()['jre'] = 'opEnjdk' # Case insensitive
- self.assertEqual(helpers.get_jre(), 'openjdk')
-
- hookenv.config()['jre'] = 'oRacle' # Case insensitive
- self.assertEqual(helpers.get_jre(), 'oracle')
-
- def test_get_jre_unknown(self):
- hookenv.config()['jre'] = 'OopsJDK'
- self.assertEqual(helpers.get_jre(), 'openjdk')
- # An error was logged.
- hookenv.log.assert_called_once_with(ANY, hookenv.ERROR)
-
- def test_get_jre_dse_override(self):
- hookenv.config()['edition'] = 'dse'
- self.assertEqual(helpers.get_jre(), 'oracle')
-
- def test_get_cassandra_edition(self):
- hookenv.config()['edition'] = 'community'
- self.assertEqual(helpers.get_cassandra_edition(), 'community')
-
- hookenv.config()['edition'] = 'DSE' # Case insensitive
- self.assertEqual(helpers.get_cassandra_edition(), 'dse')
-
- self.assertFalse(hookenv.log.called)
-
- hookenv.config()['edition'] = 'typo' # Default to community
- self.assertEqual(helpers.get_cassandra_edition(), 'community')
- hookenv.log.assert_any_call(ANY, hookenv.ERROR) # Logs an error.
-
- @patch('helpers.get_cassandra_edition')
- def test_get_cassandra_service(self, get_edition):
- get_edition.return_value = 'whatever'
- self.assertEqual(helpers.get_cassandra_service(), 'cassandra')
- get_edition.return_value = 'dse'
- self.assertEqual(helpers.get_cassandra_service(), 'dse')
-
- def test_get_cassandra_service_dse_override(self):
- hookenv.config()['edition'] = 'dse'
- self.assertEqual(helpers.get_cassandra_service(), 'dse')
-
- @patch('helpers.get_package_version')
- def test_get_cassandra_version(self, get_package_version):
- # Return cassandra package version if it is installed.
- get_package_version.return_value = '1.2.3-2~64'
- self.assertEqual(helpers.get_cassandra_version(), '1.2.3-2~64')
- get_package_version.assert_called_with('cassandra')
-
- @patch('helpers.get_package_version')
- def test_get_cassandra_version_uninstalled(self, get_package_version):
- # Return none if the main cassandra package is not installed
- get_package_version.return_value = None
- self.assertEqual(helpers.get_cassandra_version(), None)
- get_package_version.assert_called_with('cassandra')
-
- @patch('helpers.get_package_version')
- def test_get_cassandra_version_dse(self, get_package_version):
- # Return the cassandra version equivalent if using dse.
- hookenv.config()['edition'] = 'dse'
- get_package_version.return_value = '4.7-beta2~88'
- self.assertEqual(helpers.get_cassandra_version(), '2.1')
- get_package_version.assert_called_with('dse-full')
-
- @patch('helpers.get_package_version')
- def test_get_cassandra_version_dse_uninstalled(self, get_package_version):
- # Return the cassandra version equivalent if using dse.
- hookenv.config()['edition'] = 'dse'
- get_package_version.return_value = None
- self.assertEqual(helpers.get_cassandra_version(), None)
- get_package_version.assert_called_with('dse-full')
-
- def test_get_cassandra_config_dir(self):
- self.assertEqual(helpers.get_cassandra_config_dir(),
- '/etc/cassandra')
- hookenv.config()['edition'] = 'dse'
- self.assertEqual(helpers.get_cassandra_config_dir(),
- '/etc/dse/cassandra')
-
- @patch('helpers.get_cassandra_config_dir')
- def test_get_cassandra_yaml_file(self, get_cassandra_config_dir):
- get_cassandra_config_dir.return_value = '/foo'
- self.assertEqual(helpers.get_cassandra_yaml_file(),
- '/foo/cassandra.yaml')
-
- @patch('helpers.get_cassandra_config_dir')
- def test_get_cassandra_env_file(self, get_cassandra_config_dir):
- get_cassandra_config_dir.return_value = '/foo'
- self.assertEqual(helpers.get_cassandra_env_file(),
- '/foo/cassandra-env.sh')
-
- @patch('helpers.get_cassandra_config_dir')
- def test_get_cassandra_rackdc_file(self, get_cassandra_config_dir):
- get_cassandra_config_dir.return_value = '/foo'
- self.assertEqual(helpers.get_cassandra_rackdc_file(),
- '/foo/cassandra-rackdc.properties')
-
- @patch('helpers.get_cassandra_edition')
- def test_get_cassandra_pid_file(self, get_edition):
- get_edition.return_value = 'whatever'
- self.assertEqual(helpers.get_cassandra_pid_file(),
- '/var/run/cassandra/cassandra.pid')
- get_edition.return_value = 'dse'
- self.assertEqual(helpers.get_cassandra_pid_file(),
- '/var/run/dse/dse.pid')
-
- def test_get_cassandra_packages(self):
- # Default
- self.assertSetEqual(helpers.get_cassandra_packages(),
- set(['cassandra', 'ntp', 'run-one',
- 'netcat', 'openjdk-8-jre-headless']))
-
- def test_get_cassandra_packages_oracle_jre(self):
- # Oracle JRE
- hookenv.config()['jre'] = 'oracle'
- self.assertSetEqual(helpers.get_cassandra_packages(),
- set(['cassandra', 'ntp', 'run-one', 'netcat']))
-
- def test_get_cassandra_packages_dse(self):
- # DataStax Enterprise, and implicit Oracle JRE.
- hookenv.config()['edition'] = 'dsE' # Insensitive.
- self.assertSetEqual(helpers.get_cassandra_packages(),
- set(['dse-full', 'ntp', 'run-one', 'netcat']))
-
- @patch('helpers.get_cassandra_service')
- @patch('charmhelpers.core.host.service_stop')
- @patch('helpers.is_cassandra_running')
- def test_stop_cassandra(self, is_cassandra_running,
- service_stop, get_service):
- get_service.return_value = sentinel.service_name
- is_cassandra_running.side_effect = iter([True, False])
- helpers.stop_cassandra()
- service_stop.assert_called_once_with(sentinel.service_name)
-
- @patch('helpers.get_cassandra_service')
- @patch('charmhelpers.core.host.service_stop')
- @patch('helpers.is_cassandra_running')
- def test_stop_cassandra_noop(self, is_cassandra_running,
- service_stop, get_service):
- get_service.return_value = sentinel.service_name
- is_cassandra_running.return_value = False
- helpers.stop_cassandra()
- self.assertFalse(service_stop.called)
-
- @patch('charmhelpers.core.hookenv.status_set')
- @patch('helpers.get_cassandra_service')
- @patch('charmhelpers.core.host.service_stop')
- @patch('helpers.is_cassandra_running')
- def test_stop_cassandra_failure(self, is_cassandra_running,
- service_stop, get_service, status_set):
- get_service.return_value = sentinel.service_name
- is_cassandra_running.side_effect = iter([True, True])
- self.assertRaises(SystemExit, helpers.stop_cassandra)
- service_stop.assert_called_once_with(sentinel.service_name)
- status_set.assert_called_once_with('blocked',
- 'Cassandra failed to shut down')
-
- @patch('helpers.actual_seed_ips')
- @patch('time.sleep')
- @patch('helpers.get_cassandra_service')
- @patch('charmhelpers.core.host.service_start')
- @patch('helpers.is_cassandra_running')
- def test_start_cassandra(self, is_cassandra_running,
- service_start, get_service, sleep, seed_ips):
- get_service.return_value = sentinel.service_name
- seed_ips.return_value = set(['1.2.3.4'])
- is_cassandra_running.return_value = True
- helpers.start_cassandra()
- self.assertFalse(service_start.called)
-
- is_cassandra_running.side_effect = iter([False, False, False, True])
- helpers.start_cassandra()
- service_start.assert_called_once_with(sentinel.service_name)
-
- # A side effect of starting cassandra is storing the current live
- # seed list, so we can tell when it has changed.
- self.assertEqual(hookenv.config()['configured_seeds'], ['1.2.3.4'])
-
- @patch('os.chmod')
- @patch('helpers.is_cassandra_running')
- @patch('relations.StorageRelation')
- def test_remount_cassandra(self, storage, is_running, chmod):
- config = hookenv.config()
- storage().needs_remount.return_value = True
- storage().mountpoint = '/srv/foo'
- is_running.return_value = False
- config['data_file_directories'] = '/srv/ext/data1 data2'
- config['bootstrapped_into_cluster'] = True
-
- helpers.remount_cassandra()
- storage().migrate.assert_called_once_with('/var/lib/cassandra',
- 'cassandra')
- chmod.assert_called_once_with('/srv/foo/cassandra', 0o750)
- self.assertEqual(config['bootstrapped_into_cluster'], False)
-
- @patch('os.chmod')
- @patch('helpers.is_cassandra_running')
- @patch('relations.StorageRelation')
- def test_remount_cassandra_noop(self, storage, is_running, chmod):
- storage().needs_remount.return_value = False
- storage().mountpoint = None
- is_running.return_value = False
-
- helpers.remount_cassandra()
- self.assertFalse(storage().migrate.called)
- self.assertFalse(chmod.called)
-
- @patch('helpers.is_cassandra_running')
- @patch('relations.StorageRelation')
- def test_remount_cassandra_unmount(self, storage, is_running):
- storage().needs_remount.return_value = True
- storage().mountpoint = None # Reverting to local disk.
- is_running.return_value = False
- hookenv.config()['data_file_directories'] = '/srv/ext/data1 data2'
-
- helpers.remount_cassandra()
-
- # We cannot migrate data back to local disk, as by the time our
- # hooks are called the data is gone.
- self.assertFalse(storage().migrate.called)
-
- # We warn in this case, as reverting to local disk may resurrect
- # old data (if the cluster was ever time while using local
- # disk).
- hookenv.log.assert_any_call(ANY, hookenv.WARNING)
-
- @patch('helpers.ensure_database_directory')
- @patch('helpers.get_all_database_directories')
- def test_ensure_database_directories(self, get_all_dirs, ensure_dir):
- get_all_dirs.return_value = dict(
- data_file_directories=[sentinel.data_file_dir_1,
- sentinel.data_file_dir_2],
- commitlog_directory=sentinel.commitlog_dir,
- saved_caches_directory=sentinel.saved_caches_dir)
- helpers.ensure_database_directories()
- ensure_dir.assert_has_calls([
- call(sentinel.data_file_dir_1),
- call(sentinel.data_file_dir_2),
- call(sentinel.commitlog_dir),
- call(sentinel.saved_caches_dir)], any_order=True)
-
- @patch('cassandra.cluster.Cluster')
- @patch('cassandra.auth.PlainTextAuthProvider')
- @patch('helpers.superuser_credentials')
- @patch('helpers.read_cassandra_yaml')
- def test_connect(self, yaml, creds, auth_provider, cluster):
- # host and port are pulled from the current active
- # cassandra.yaml file, rather than configuration, as
- # configuration may not match reality (if for no other reason
- # that we are running this code in order to make reality match
- # the desired configuration).
- yaml.return_value = dict(rpc_address='1.2.3.4',
- native_transport_port=666)
-
- creds.return_value = ('un', 'pw')
- auth_provider.return_value = sentinel.ap
-
- cluster().connect.return_value = sentinel.session
- cluster.reset_mock()
-
- with helpers.connect() as session:
- auth_provider.assert_called_once_with(username='un',
- password='pw')
- cluster.assert_called_once_with(['1.2.3.4'], port=666,
- auth_provider=sentinel.ap)
- self.assertIs(session, sentinel.session)
- self.assertFalse(cluster().shutdown.called)
-
- cluster().shutdown.assert_called_once_with()
-
- @patch('cassandra.cluster.Cluster')
- @patch('cassandra.auth.PlainTextAuthProvider')
- @patch('helpers.superuser_credentials')
- @patch('helpers.read_cassandra_yaml')
- def test_connect_with_creds(self, yaml, creds, auth_provider, cluster):
- # host and port are pulled from the current active
- # cassandra.yaml file, rather than configuration, as
- # configuration may not match reality (if for no other reason
- # that we are running this code in order to make reality match
- # the desired configuration).
- yaml.return_value = dict(rpc_address='1.2.3.4',
- native_transport_port=666)
-
- auth_provider.return_value = sentinel.ap
-
- with helpers.connect(username='explicit', password='boo'):
- auth_provider.assert_called_once_with(username='explicit',
- password='boo')
-
- @patch('time.sleep')
- @patch('time.time')
- @patch('cassandra.cluster.Cluster')
- @patch('helpers.superuser_credentials')
- @patch('helpers.read_cassandra_yaml')
- def test_connect_badauth(self, yaml, creds, cluster, time, sleep):
- # host and port are pulled from the current active
- # cassandra.yaml file, rather than configuration, as
- # configuration may not match reality (if for no other reason
- # that we are running this code in order to make reality match
- # the desired configuration).
- yaml.return_value = dict(rpc_address='1.2.3.4',
- native_transport_port=666)
- time.side_effect = [0, 7, 99999]
-
- creds.return_value = ('un', 'pw')
-
- x = NoHostAvailable('whoops', {'1.2.3.4': AuthenticationFailed()})
- cluster().connect.side_effect = x
-
- self.assertRaises(AuthenticationFailed, helpers.connect().__enter__)
-
- # Authentication failures are retried, but for a shorter time
- # than other connection errors which are retried for a few
- # minutes.
- self.assertEqual(cluster().connect.call_count, 2)
- self.assertEqual(cluster().shutdown.call_count, 2)
-
- @patch('time.sleep')
- @patch('time.time')
- @patch('cassandra.cluster.Cluster')
- @patch('helpers.superuser_credentials')
- @patch('helpers.read_cassandra_yaml')
- def test_connect_timeout(self, yaml, creds, cluster, time, sleep):
- yaml.return_value = dict(rpc_address='1.2.3.4',
- native_transport_port=666)
- time.side_effect = [0, 1, 2, 3, 10, 20, 30, 40, 99999]
-
- creds.return_value = ('un', 'pw')
-
- x = NoHostAvailable('whoops', {'1.2.3.4': sentinel.exception})
- cluster().connect.side_effect = x
-
- self.assertRaises(NoHostAvailable, helpers.connect().__enter__)
-
- # Authentication failures fail immediately, unlike other
- # connection errors which are retried.
- self.assertEqual(cluster().connect.call_count, 5)
- self.assertEqual(cluster().shutdown.call_count, 5)
- self.assertEqual(sleep.call_count, 4)
-
- @patch('cassandra.query.SimpleStatement')
- def test_query(self, simple_statement):
- simple_statement.return_value = sentinel.s_statement
- session = MagicMock()
- session.execute.return_value = sentinel.results
- self.assertEqual(helpers.query(session, sentinel.statement,
- sentinel.consistency, sentinel.args),
- sentinel.results)
- simple_statement.assert_called_once_with(
- sentinel.statement, consistency_level=sentinel.consistency)
- session.execute.assert_called_once_with(simple_statement(''),
- sentinel.args)
-
- @patch('cassandra.query.SimpleStatement')
- @patch('helpers.backoff')
- def test_query_retry(self, backoff, simple_statement):
- backoff.return_value = repeat(True)
- simple_statement.return_value = sentinel.s_statement
- session = MagicMock()
- session.execute.side_effect = iter([RuntimeError(), sentinel.results])
- self.assertEqual(helpers.query(session, sentinel.statement,
- sentinel.consistency, sentinel.args),
- sentinel.results)
- self.assertEqual(session.execute.call_count, 2)
-
- @patch('time.time')
- @patch('cassandra.query.SimpleStatement')
- @patch('helpers.backoff')
- def test_query_timeout(self, backoff, simple_statement, time):
- backoff.return_value = repeat(True)
- # Timeout is 600
- time.side_effect = iter([0, 1, 2, 3, 500, 700, RuntimeError()])
- simple_statement.return_value = sentinel.s_statement
- session = MagicMock()
-
- class Whoops(Exception):
- pass
-
- session.execute.side_effect = repeat(Whoops('Fail'))
- self.assertRaises(Whoops, helpers.query, session, sentinel.statement,
- sentinel.consistency, sentinel.args)
- self.assertEqual(session.execute.call_count, 4)
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.query')
- def test_ensure_user(self, query, ver):
- ver.return_value = '2.1'
- helpers.ensure_user(sentinel.session,
- sentinel.username, sentinel.pwhash,
- superuser=sentinel.supflag)
- query.assert_has_calls([
- call(sentinel.session,
- 'INSERT INTO system_auth.users (name, super) VALUES (%s, %s)',
- ConsistencyLevel.ALL, (sentinel.username, sentinel.supflag)),
- call(sentinel.session,
- 'INSERT INTO system_auth.credentials (username, salted_hash) '
- 'VALUES (%s, %s)',
- ConsistencyLevel.ALL,
- (sentinel.username, sentinel.pwhash))])
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.query')
- def test_ensure_user_22(self, query, ver):
- ver.return_value = '2.2'
- helpers.ensure_user(sentinel.session,
- sentinel.username, sentinel.pwhash,
- superuser=sentinel.supflag)
- query.assert_called_once_with(sentinel.session,
- 'INSERT INTO system_auth.roles (role, '
- 'can_login, is_superuser, salted_hash) '
- 'VALUES (%s, TRUE, %s, %s)',
- ConsistencyLevel.ALL,
- (sentinel.username, sentinel.supflag,
- sentinel.pwhash))
-
- @patch('helpers.ensure_user')
- @patch('helpers.encrypt_password')
- @patch('helpers.nodetool')
- @patch('helpers.reconfigure_and_restart_cassandra')
- @patch('helpers.connect')
- @patch('helpers.superuser_credentials')
- def test_create_unit_superuser_hard(self, creds, connect, restart,
- nodetool, encrypt_password,
- ensure_user):
- creds.return_value = (sentinel.username, sentinel.password)
- connect().__enter__.return_value = sentinel.session
- connect().__exit__.return_value = False
- connect.reset_mock()
-
- encrypt_password.return_value = sentinel.pwhash
-
- helpers.create_unit_superuser_hard()
-
- # Cassandra was restarted twice, first with authentication
- # disabled and again with the normal configuration.
- restart.assert_has_calls([
- call(dict(authenticator='AllowAllAuthenticator',
- rpc_address='localhost')),
- call()])
-
- # A connection was made as the superuser, which words because
- # authentication has been disabled on this node.
- connect.assert_called_once_with()
-
- # The user was created.
- encrypt_password.assert_called_once_with(sentinel.password)
- ensure_user.assert_called_once_with(sentinel.session,
- sentinel.username,
- sentinel.pwhash,
- superuser=True)
-
- # Local Cassandra was flushed. This is probably unnecessary.
- nodetool.assert_called_once_with('flush')
-
- def test_cqlshrc_path(self):
- self.assertEqual(helpers.get_cqlshrc_path(),
- '/root/.cassandra/cqlshrc')
-
- def test_superuser_username(self):
- self.assertEqual(hookenv.local_unit(), 'service/1')
- self.assertEqual(helpers.superuser_username(), 'juju_service_1')
-
- @patch('helpers.superuser_username')
- @patch('helpers.get_cqlshrc_path')
- @patch('helpers.get_cassandra_version')
- @patch('charmhelpers.core.host.pwgen')
- def test_superuser_credentials_20(self, pwgen, get_cassandra_version,
- get_cqlshrc_path, get_username):
- get_cassandra_version.return_value = '2.0'
- with tempfile.TemporaryDirectory() as dotcassandra_dir:
- cqlshrc_path = os.path.join(dotcassandra_dir, 'cqlshrc')
- get_cqlshrc_path.return_value = cqlshrc_path
- get_username.return_value = 'foo'
- pwgen.return_value = 'secret'
- hookenv.config()['rpc_port'] = 666
- hookenv.config()['native_transport_port'] = 777
-
- # First time generates username & password.
- username, password = helpers.superuser_credentials()
- self.assertEqual(username, 'foo')
- self.assertEqual(password, 'secret')
-
- # Credentials are stored in the cqlshrc file.
- expected_cqlshrc = dedent('''\
- [authentication]
- username = foo
- password = secret
-
- [connection]
- hostname = 10.30.0.1
- port = 666
- ''').strip()
- with open(cqlshrc_path, 'r') as f:
- self.assertEqual(f.read().strip(), expected_cqlshrc)
-
- # If the credentials have been stored, they are not
- # regenerated.
- pwgen.return_value = 'secret2'
- username, password = helpers.superuser_credentials()
- self.assertEqual(username, 'foo')
- self.assertEqual(password, 'secret')
- with open(cqlshrc_path, 'r') as f:
- self.assertEqual(f.read().strip(), expected_cqlshrc)
-
- @patch('helpers.superuser_username')
- @patch('helpers.get_cqlshrc_path')
- @patch('helpers.get_cassandra_version')
- @patch('charmhelpers.core.host.pwgen')
- def test_superuser_credentials(self, pwgen, get_cassandra_version,
- get_cqlshrc_path, get_username):
- # Cassandra 2.1 or higher uses native protocol in its cqlshrc
- get_cassandra_version.return_value = '2.1'
- with tempfile.TemporaryDirectory() as dotcassandra_dir:
- cqlshrc_path = os.path.join(dotcassandra_dir, 'cqlshrc')
- get_cqlshrc_path.return_value = cqlshrc_path
- get_username.return_value = 'foo'
- pwgen.return_value = 'secret'
- hookenv.config()['rpc_port'] = 666
- hookenv.config()['native_transport_port'] = 777
-
- # First time generates username & password.
- username, password = helpers.superuser_credentials()
- self.assertEqual(username, 'foo')
- self.assertEqual(password, 'secret')
-
- # Credentials are stored in the cqlshrc file.
- expected_cqlshrc = dedent('''\
- [authentication]
- username = foo
- password = secret
-
- [connection]
- hostname = 10.30.0.1
- port = 777
- ''').strip()
- with open(cqlshrc_path, 'r') as f:
- self.assertEqual(f.read().strip(), expected_cqlshrc)
-
- @patch('subprocess.check_output')
- def test_nodetool(self, check_output):
- check_output.return_value = 'OK'
- self.assertEqual(helpers.nodetool('status', 'system_auth'), 'OK')
-
- # The expected command was run against the local node.
- check_output.assert_called_once_with(
- ['nodetool', 'status', 'system_auth'],
- universal_newlines=True, stderr=subprocess.STDOUT, timeout=119)
-
- # The output was emitted.
- helpers.emit.assert_called_once_with('OK')
-
- @patch('helpers.is_cassandra_running')
- @patch('helpers.backoff')
- @patch('subprocess.check_output')
- def test_nodetool_CASSANDRA_8776(self, check_output, backoff, is_running):
- is_running.return_value = True
- backoff.return_value = repeat(True)
- check_output.side_effect = iter(['ONE Error: stuff', 'TWO OK'])
- self.assertEqual(helpers.nodetool('status'), 'TWO OK')
-
- # The output was emitted.
- helpers.emit.assert_called_once_with('TWO OK')
-
- @patch('helpers.is_cassandra_running')
- @patch('helpers.backoff')
- @patch('subprocess.check_output')
- def test_nodetool_retry(self, check_output, backoff, is_running):
- backoff.return_value = repeat(True)
- is_running.return_value = True
- check_output.side_effect = iter([
- subprocess.CalledProcessError([], 1, 'fail 1'),
- subprocess.CalledProcessError([], 1, 'fail 2'),
- subprocess.CalledProcessError([], 1, 'fail 3'),
- subprocess.CalledProcessError([], 1, 'fail 4'),
- subprocess.CalledProcessError([], 1, 'fail 5'),
- 'OK'])
- self.assertEqual(helpers.nodetool('status'), 'OK')
-
- # Later fails and final output was emitted.
- helpers.emit.assert_has_calls([call('fail 5'), call('OK')])
-
- @patch('helpers.get_bootstrapped_ips')
- def test_num_nodes(self, bootstrapped_ips):
- bootstrapped_ips.return_value = ['10.0.0.1', '10.0.0.2']
- self.assertEqual(helpers.num_nodes(), 2)
-
- @patch('helpers.get_cassandra_yaml_file')
- def test_read_cassandra_yaml(self, get_cassandra_yaml_file):
- with tempfile.NamedTemporaryFile('w') as f:
- f.write('a: one')
- f.flush()
- get_cassandra_yaml_file.return_value = f.name
- self.assertDictEqual(helpers.read_cassandra_yaml(),
- dict(a='one'))
-
- @patch('helpers.get_cassandra_yaml_file')
- def test_write_cassandra_yaml(self, get_cassandra_yaml_file):
- with tempfile.NamedTemporaryFile() as f:
- get_cassandra_yaml_file.return_value = f.name
- helpers.write_cassandra_yaml([1, 2, 3])
- with open(f.name, 'r') as f2:
- self.assertEqual(f2.read(), '[1, 2, 3]\n')
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.get_cassandra_yaml_file')
- @patch('helpers.get_seed_ips')
- @patch('charmhelpers.core.host.write_file')
- def test_configure_cassandra_yaml_20(self, write_file, seed_ips, yaml_file,
- get_cassandra_version):
- get_cassandra_version.return_value = '2.0'
- hookenv.config().update(dict(num_tokens=128,
- cluster_name='test_cluster_name',
- partitioner='test_partitioner'))
-
- seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
-
- existing_config = '''
- seed_provider:
- - class_name: blah.SimpleSeedProvider
- parameters:
- - seeds: 127.0.0.1 # Comma separated list.
- '''
-
- with tempfile.TemporaryDirectory() as tmpdir:
- yaml_config = os.path.join(tmpdir, 'c.yaml')
- yaml_file.return_value = yaml_config
- with open(yaml_config, 'w', encoding='UTF-8') as f:
- f.write(existing_config)
-
- helpers.configure_cassandra_yaml()
-
- self.assertEqual(write_file.call_count, 2)
- new_config = write_file.call_args[0][1]
-
- expected_config = dedent('''\
- cluster_name: test_cluster_name
- authenticator: PasswordAuthenticator
- num_tokens: 128
- partitioner: test_partitioner
- listen_address: 10.20.0.1
- rpc_address: 0.0.0.0
- rpc_port: 9160
- native_transport_port: 9042
- storage_port: 7000
- ssl_storage_port: 7001
- authorizer: AllowAllAuthorizer
- seed_provider:
- - class_name: blah.SimpleSeedProvider
- parameters:
- # No whitespace in seeds is important.
- - seeds: '10.20.0.1,10.20.0.2,10.20.0.3'
- endpoint_snitch: GossipingPropertyFileSnitch
- data_file_directories:
- - /var/lib/cassandra/data
- commitlog_directory: /var/lib/cassandra/commitlog
- saved_caches_directory: /var/lib/cassandra/saved_caches
- compaction_throughput_mb_per_sec: 16
- stream_throughput_outbound_megabits_per_sec: 200
- tombstone_warn_threshold: 1000
- tombstone_failure_threshold: 100000
- start_rpc: true
- ''')
- self.maxDiff = None
- self.assertEqual(yaml.safe_load(new_config),
- yaml.safe_load(expected_config))
-
- # Confirm we can use an explicit cluster_name too.
- write_file.reset_mock()
- hookenv.config()['cluster_name'] = 'fubar'
- helpers.configure_cassandra_yaml()
- new_config = write_file.call_args[0][1]
- self.assertEqual(yaml.safe_load(new_config)['cluster_name'],
- 'fubar')
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.get_cassandra_yaml_file')
- @patch('helpers.get_seed_ips')
- @patch('charmhelpers.core.host.write_file')
- def test_configure_cassandra_yaml_22(self, write_file, seed_ips, yaml_file,
- get_cassandra_version):
- get_cassandra_version.return_value = '2.0'
- hookenv.config().update(dict(num_tokens=128,
- cluster_name='test_cluster_name',
- partitioner='test_partitioner'))
-
- seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
-
- existing_config = '''
- seed_provider:
- - class_name: blah.SimpleSeedProvider
- parameters:
- - seeds: 127.0.0.1 # Comma separated list.
- start_rpc: false # Defaults to False starting 2.2
- '''
-
- with tempfile.TemporaryDirectory() as tmpdir:
- yaml_config = os.path.join(tmpdir, 'c.yaml')
- yaml_file.return_value = yaml_config
- with open(yaml_config, 'w', encoding='UTF-8') as f:
- f.write(existing_config)
-
- helpers.configure_cassandra_yaml()
-
- self.assertEqual(write_file.call_count, 2)
- new_config = write_file.call_args[0][1]
-
- expected_config = dedent('''\
- start_rpc: true
- cluster_name: test_cluster_name
- authenticator: PasswordAuthenticator
- num_tokens: 128
- partitioner: test_partitioner
- listen_address: 10.20.0.1
- rpc_address: 0.0.0.0
- rpc_port: 9160
- native_transport_port: 9042
- storage_port: 7000
- ssl_storage_port: 7001
- authorizer: AllowAllAuthorizer
- seed_provider:
- - class_name: blah.SimpleSeedProvider
- parameters:
- # No whitespace in seeds is important.
- - seeds: '10.20.0.1,10.20.0.2,10.20.0.3'
- endpoint_snitch: GossipingPropertyFileSnitch
- data_file_directories:
- - /var/lib/cassandra/data
- commitlog_directory: /var/lib/cassandra/commitlog
- saved_caches_directory: /var/lib/cassandra/saved_caches
- compaction_throughput_mb_per_sec: 16
- stream_throughput_outbound_megabits_per_sec: 200
- tombstone_warn_threshold: 1000
- tombstone_failure_threshold: 100000
- ''')
- self.maxDiff = None
- self.assertEqual(yaml.safe_load(new_config),
- yaml.safe_load(expected_config))
-
- # Confirm we can use an explicit cluster_name too.
- write_file.reset_mock()
- hookenv.config()['cluster_name'] = 'fubar'
- helpers.configure_cassandra_yaml()
- new_config = write_file.call_args[0][1]
- self.assertEqual(yaml.safe_load(new_config)['cluster_name'],
- 'fubar')
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.get_cassandra_yaml_file')
- @patch('helpers.get_seed_ips')
- @patch('charmhelpers.core.host.write_file')
- def test_configure_cassandra_yaml(self, write_file, seed_ips,
- yaml_file, get_cassandra_version):
- get_cassandra_version.return_value = '2.1'
- hookenv.config().update(dict(num_tokens=128,
- cluster_name='test_cluster_name',
- partitioner='test_partitioner'))
-
- seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
-
- existing_config = '''
- seed_provider:
- - class_name: blah.SimpleSeedProvider
- parameters:
- - seeds: 127.0.0.1 # Comma separated list.
- '''
-
- with tempfile.TemporaryDirectory() as tmpdir:
- yaml_config = os.path.join(tmpdir, 'c.yaml')
- yaml_file.return_value = yaml_config
- with open(yaml_config, 'w', encoding='UTF-8') as f:
- f.write(existing_config)
-
- helpers.configure_cassandra_yaml()
-
- self.assertEqual(write_file.call_count, 2)
- new_config = write_file.call_args[0][1]
-
- expected_config = dedent('''\
- cluster_name: test_cluster_name
- authenticator: PasswordAuthenticator
- num_tokens: 128
- partitioner: test_partitioner
- listen_address: 10.20.0.1
- rpc_address: 0.0.0.0
- broadcast_rpc_address: 10.30.0.1
- start_rpc: true
- rpc_port: 9160
- native_transport_port: 9042
- storage_port: 7000
- ssl_storage_port: 7001
- authorizer: AllowAllAuthorizer
- seed_provider:
- - class_name: blah.SimpleSeedProvider
- parameters:
- # No whitespace in seeds is important.
- - seeds: '10.20.0.1,10.20.0.2,10.20.0.3'
- endpoint_snitch: GossipingPropertyFileSnitch
- data_file_directories:
- - /var/lib/cassandra/data
- commitlog_directory: /var/lib/cassandra/commitlog
- saved_caches_directory: /var/lib/cassandra/saved_caches
- compaction_throughput_mb_per_sec: 16
- stream_throughput_outbound_megabits_per_sec: 200
- tombstone_warn_threshold: 1000
- tombstone_failure_threshold: 100000
- ''')
- self.maxDiff = None
- self.assertEqual(yaml.safe_load(new_config),
- yaml.safe_load(expected_config))
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.get_cassandra_yaml_file')
- @patch('helpers.get_seed_ips')
- @patch('charmhelpers.core.host.write_file')
- def test_configure_cassandra_yaml_overrides(self, write_file, seed_ips,
- yaml_file, version):
- version.return_value = '2.1'
- hookenv.config().update(dict(num_tokens=128,
- cluster_name=None,
- partitioner='my_partitioner'))
-
- seed_ips.return_value = ['10.20.0.1', '10.20.0.2', '10.20.0.3']
-
- existing_config = dedent('''\
- seed_provider:
- - class_name: blah.blah.SimpleSeedProvider
- parameters:
- - seeds: 127.0.0.1 # Comma separated list.
- ''')
- overrides = dict(partitioner='overridden_partitioner')
-
- with tempfile.TemporaryDirectory() as tmpdir:
- yaml_config = os.path.join(tmpdir, 'c.yaml')
- yaml_file.return_value = yaml_config
- with open(yaml_config, 'w', encoding='UTF-8') as f:
- f.write(existing_config)
-
- helpers.configure_cassandra_yaml(overrides=overrides)
-
- self.assertEqual(write_file.call_count, 2)
- new_config = write_file.call_args[0][1]
-
- self.assertEqual(yaml.safe_load(new_config)['partitioner'],
- 'overridden_partitioner')
-
- def test_get_pid_from_file(self):
- with tempfile.NamedTemporaryFile('w') as pid_file:
- pid_file.write(' 42\t')
- pid_file.flush()
- self.assertEqual(helpers.get_pid_from_file(pid_file.name), 42)
- pid_file.write('\nSome Noise')
- pid_file.flush()
- self.assertEqual(helpers.get_pid_from_file(pid_file.name), 42)
-
- for invalid_pid in ['-1', '0', 'fred']:
- with self.subTest(invalid_pid=invalid_pid):
- with tempfile.NamedTemporaryFile('w') as pid_file:
- pid_file.write(invalid_pid)
- pid_file.flush()
- self.assertRaises(ValueError,
- helpers.get_pid_from_file, pid_file.name)
-
- with tempfile.TemporaryDirectory() as tmpdir:
- self.assertRaises(OSError, helpers.get_pid_from_file,
- os.path.join(tmpdir, 'invalid.pid'))
-
- @patch('helpers.get_cassandra_pid_file')
- def test_is_cassandra_running_not_running(self, get_pid_file):
- # When Cassandra is not running, the pidfile does not exist.
- get_pid_file.return_value = 'does not exist'
- self.assertFalse(helpers.is_cassandra_running())
-
- @patch('os.path.exists')
- @patch('helpers.get_pid_from_file')
- def test_is_cassandra_running_invalid_pid(self, get_pid_from_file, exists):
- # get_pid_from_file raises a ValueError if the pid is illegal.
- get_pid_from_file.side_effect = repeat(ValueError('Whoops'))
- exists.return_value = True # The pid file is there, just insane.
-
- # is_cassandra_running() fails hard in this case, since we
- # cannot safely continue when the system is insane.
- self.assertRaises(ValueError, helpers.is_cassandra_running)
-
- @patch('os.kill')
- @patch('os.path.exists')
- @patch('helpers.get_pid_from_file')
- def test_is_cassandra_running_missing_process(self, get_pid_from_file,
- exists, kill):
- # get_pid_from_file raises a ValueError if the pid is illegal.
- get_pid_from_file.return_value = sentinel.pid_file
- exists.return_value = True # The pid file is there
- kill.side_effect = repeat(ProcessLookupError()) # The process isn't
- self.assertFalse(helpers.is_cassandra_running())
-
- @patch('os.kill')
- @patch('os.path.exists')
- @patch('helpers.get_pid_from_file')
- def test_is_cassandra_running_wrong_user(self, get_pid_from_file,
- exists, kill):
- # get_pid_from_file raises a ValueError if the pid is illegal.
- get_pid_from_file.return_value = sentinel.pid_file
- exists.return_value = True # The pid file is there
- kill.side_effect = repeat(PermissionError()) # But the process isn't
- self.assertRaises(PermissionError, helpers.is_cassandra_running)
-
- @patch('time.sleep')
- @patch('os.kill')
- @patch('helpers.get_pid_from_file')
- @patch('subprocess.call')
- def test_is_cassandra_running_starting_up(self, call, get_pid_from_file,
- kill, sleep):
- sleep.return_value = None # Don't actually sleep in unittests.
- os.kill.return_value = True # There is a running pid.
- get_pid_from_file.return_value = 42
- subprocess.call.side_effect = iter([3, 2, 1, 0]) # 4th time the charm
- self.assertTrue(helpers.is_cassandra_running())
-
- @patch('helpers.backoff')
- @patch('os.kill')
- @patch('subprocess.call')
- @patch('helpers.get_pid_from_file')
- def test_is_cassandra_running_shutting_down(self, get_pid_from_file,
- call, kill, backoff):
- # If Cassandra is in the process of shutting down, it might take
- # several failed checks before the pid file disappears.
- backoff.return_value = repeat(True)
- os.kill.return_value = None # The process is running
- call.return_value = 1 # But nodetool is not succeeding.
-
- # Fourth time, the pid file is gone.
- get_pid_from_file.side_effect = iter([42, 42, 42,
- FileNotFoundError('Whoops')])
- self.assertFalse(helpers.is_cassandra_running())
-
- @patch('os.kill')
- @patch('subprocess.call')
- @patch('os.path.exists')
- @patch('helpers.get_pid_from_file')
- def test_is_cassandra_running_failsafe(self, get_pid_from_file,
- exists, subprocess_call, kill):
- get_pid_from_file.return_value = sentinel.pid_file
- exists.return_value = True # The pid file is there
- subprocess_call.side_effect = repeat(RuntimeError('whoops'))
- # Weird errors are reraised.
- self.assertRaises(RuntimeError, helpers.is_cassandra_running)
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.query')
- def test_get_auth_keyspace_replication(self, query, ver):
- ver.return_value = '2.2'
- query.return_value = [('{"json": true}',)]
- settings = helpers.get_auth_keyspace_replication(sentinel.session)
- self.assertDictEqual(settings, dict(json=True))
- query.assert_called_once_with(
- sentinel.session, dedent('''\
- SELECT strategy_options FROM system.schema_keyspaces
- WHERE keyspace_name='system_auth'
- '''), ConsistencyLevel.QUORUM)
-
- @patch('helpers.get_cassandra_version')
- @patch('helpers.query')
- def test_get_auth_keyspace_replication_30(self, query, ver):
- ver.return_value = '3.0'
- query.return_value = [({"json": True},)] # Decoded under 3.0
- settings = helpers.get_auth_keyspace_replication(sentinel.session)
- self.assertDictEqual(settings, dict(json=True))
- query.assert_called_once_with(
- sentinel.session, dedent('''\
- SELECT replication FROM system_schema.keyspaces
- WHERE keyspace_name='system_auth'
- '''), ConsistencyLevel.QUORUM)
-
- @patch('helpers.status_set')
- @patch('charmhelpers.core.hookenv.status_get')
- @patch('helpers.query')
- def test_set_auth_keyspace_replication(self, query,
- status_get, status_set):
- status_get.return_value = ('active', '')
- settings = dict(json=True)
- helpers.set_auth_keyspace_replication(sentinel.session, settings)
- query.assert_called_once_with(sentinel.session,
- 'ALTER KEYSPACE system_auth '
- 'WITH REPLICATION = %s',
- ConsistencyLevel.ALL, (settings,))
-
- @patch('helpers.status_set')
- @patch('charmhelpers.core.hookenv.status_get')
- @patch('helpers.nodetool')
- def test_repair_auth_keyspace(self, nodetool, status_get, status_set):
- status_get.return_value = (sentinel.status, '')
- helpers.repair_auth_keyspace()
- status_set.assert_called_once_with(sentinel.status,
- 'Repairing system_auth keyspace')
- # The repair operation may still fail, and I am currently regularly
- # seeing 'snapshot creation' errors. Repair also takes ages with
- # Cassandra 2.0. So retry until success, up to 1 hour.
- nodetool.assert_called_once_with('repair', 'system_auth', timeout=3600)
-
- def test_is_bootstrapped(self):
- self.assertFalse(helpers.is_bootstrapped())
- helpers.set_bootstrapped()
- self.assertTrue(helpers.is_bootstrapped())
-
- @patch('helpers.get_node_status')
- def test_is_decommissioned(self, get_node_status):
- get_node_status.return_value = 'DECOMMISSIONED'
- self.assertTrue(helpers.is_decommissioned())
- get_node_status.return_value = 'LEAVING'
- self.assertTrue(helpers.is_decommissioned())
- get_node_status.return_value = 'NORMAL'
- self.assertFalse(helpers.is_decommissioned())
-
- @patch('helpers.nodetool')
- def test_emit_describe_cluster(self, nodetool):
- helpers.emit_describe_cluster()
- nodetool.assert_called_once_with('describecluster')
-
- @patch('helpers.nodetool')
- def test_emit_status(self, nodetool):
- helpers.emit_status()
- nodetool.assert_called_once_with('status')
-
- @patch('helpers.nodetool')
- def test_emit_netstats(self, nodetool):
- helpers.emit_netstats()
- nodetool.assert_called_once_with('netstats')
-
- def test_week_spread(self):
- # The first seven units run midnight on different days.
- for i in range(0, 7): # There is no unit 0
- with self.subTest(unit=i):
- self.assertTupleEqual(helpers.week_spread(i), (i, 0, 0))
-
- # The next seven units run midday on different days.
- for i in range(7, 14):
- with self.subTest(unit=i):
- self.assertTupleEqual(helpers.week_spread(i), (i - 7, 12, 0))
-
- # And the next seven units at 6 am on different days.
- for i in range(14, 21):
- with self.subTest(unit=i):
- self.assertTupleEqual(helpers.week_spread(i), (i - 14, 6, 0))
-
- # This keeps going as best we can, subdividing the hours.
- self.assertTupleEqual(helpers.week_spread(811), (6, 19, 18))
-
- # The granularity is 1 minute, so eventually we wrap after about
- # 7000 units.
- self.assertTupleEqual(helpers.week_spread(0), (0, 0, 0))
- for i in range(1, 7168):
- with self.subTest(unit=i):
- self.assertNotEqual(helpers.week_spread(i), (0, 0, 0))
- self.assertTupleEqual(helpers.week_spread(7168), (0, 0, 0))
-
- def test_local_plugins_dir(self):
- self.assertEqual(helpers.local_plugins_dir(),
- '/usr/local/lib/nagios/plugins')
-
- def test_update_hosts_file_new_entry(self):
- org = dedent("""\
- 127.0.0.1 localhost
- 10.0.1.2 existing
- """)
- new = dedent("""\
- 127.0.0.1 localhost
- 10.0.1.2 existing
- 10.0.1.3 newname
- """)
- with tempfile.NamedTemporaryFile(mode='w') as f:
- f.write(org)
- f.flush()
- m = {'10.0.1.3': 'newname'}
- helpers.update_hosts_file(f.name, m)
- self.assertEqual(new.strip(), open(f.name, 'r').read().strip())
-
- def test_update_hosts_file_changed_entry(self):
- org = dedent("""\
- 127.0.0.1 localhost
- 10.0.1.2 existing
- """)
- new = dedent("""\
- 127.0.0.1 localhost
- 10.0.1.3 existing
- """)
- with tempfile.NamedTemporaryFile(mode='w') as f:
- f.write(org)
- f.flush()
- m = {'10.0.1.3': 'existing'}
- helpers.update_hosts_file(f.name, m)
- self.assertEqual(new.strip(), open(f.name, 'r').read().strip())
-
-
-class TestIsLxc(unittest.TestCase):
- def test_is_lxc(self):
- # Test the function runs under the current environmnet.
- # Unfortunately we can't sanely test that it is returning the
- # correct value
- helpers.is_lxc()
-
-
-if __name__ == '__main__':
- unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/test_integration.py b/charms/trusty/cassandra/tests/test_integration.py
deleted file mode 100755
index 8d91bce..0000000
--- a/charms/trusty/cassandra/tests/test_integration.py
+++ /dev/null
@@ -1,620 +0,0 @@
-#!.venv3/bin/python3
-#
-# Copyright 2015 Canonical Ltd.
-#
-# This file is part of the Cassandra Charm for Juju.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranties of
-# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
-# PURPOSE. See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import configparser
-from functools import wraps
-import glob
-import http.server
-from itertools import count
-import logging
-import multiprocessing
-import os
-import socket
-import subprocess
-import sys
-import time
-import unittest
-import uuid
-import warnings
-
-warnings.filterwarnings('ignore', 'The blist library is not available')
-
-import amulet.deployer
-import amulet.helpers
-from cassandra import Unavailable, ConsistencyLevel, AuthenticationFailed
-from cassandra.auth import PlainTextAuthProvider
-from cassandra.cluster import Cluster, NoHostAvailable
-from cassandra.query import SimpleStatement
-import yaml
-
-import helpers
-from testing.amuletfixture import AmuletFixture
-
-
-SERIES = os.environ.get('SERIES', 'trusty')
-
-WAIT_TIMEOUT = int(os.environ.get('AMULET_TIMEOUT', 3600))
-
-ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
-
-
-class TestDeploymentBase(unittest.TestCase):
- rf = 1
- deployment = None
-
- common_config = dict(max_heap_size='96M',
- heap_newsize='4M')
- test_config = dict()
-
- @classmethod
- def setUpClass(cls):
- deployment = AmuletFixture(series=SERIES)
- deployment.setUp()
- cls.deployment = deployment
-
- deployment.add('cassandra', units=cls.rf,
- constraints=dict(mem="2G"))
- deployment.expose('cassandra') # Tests need client access.
- config = dict()
- config.update(cls.common_config)
- config.update(cls.test_config) # Test subclass overrides
- deployment.configure('cassandra', config)
-
- deployment.add('storage',
- 'cs:~stub/{}/storage'.format(SERIES))
- deployment.configure('storage', dict(provider='local'))
-
- # A stub client charm.
- empty_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
- os.pardir, 'lib',
- 'testcharms', 'empty'))
- deployment.add('client', empty_path)
- deployment.relate('cassandra:database', 'client:database')
- deployment.relate('cassandra:database-admin', 'client:database-admin')
-
- # No official trusty branch of the nrpe-external-master charm, yet.
- # This is a problem as it means tests may not be running against
- # the lastest version.
- deployment.add('nrpe',
- 'cs:~stub/{}/nrpe-external-master'
- ''.format(SERIES))
- deployment.relate('cassandra:nrpe-external-master',
- 'nrpe:nrpe-external-master')
-
- deployment.deploy(timeout=WAIT_TIMEOUT)
-
- # Silence noise - we are testing the charm, not the Cassandra
- # driver.
- cassandra_log = logging.getLogger('cassandra')
- cassandra_log.setLevel(logging.CRITICAL)
-
- @classmethod
- def tearDownClass(cls):
- cls.deployment.tearDown()
- cls.deployment = None
-
- def juju_status(self):
- status_yaml = subprocess.check_output(['juju', 'status',
- '--format=yaml'])
- if not status_yaml.strip():
- return None
- return yaml.safe_load(status_yaml)
-
- def cluster(self, username=None, password=None, hosts=None, port=9042):
- status = self.juju_status()
-
- if username is None or password is None:
- # Get some valid credentials - unit's superuser account will do.
- unit = sorted(status['services']['cassandra']['units'].keys())[0]
- cqlshrc_path = helpers.get_cqlshrc_path()
- cqlshrc = configparser.ConfigParser(interpolation=None)
- cqlshrc.read_string(
- self.deployment.sentry[unit].file_contents(cqlshrc_path))
- username = cqlshrc['authentication']['username']
- password = cqlshrc['authentication']['password']
-
- auth_provider = PlainTextAuthProvider(username=username,
- password=password)
-
- if hosts is None:
- # Get the IP addresses
- hosts = []
- for unit, d in status['services']['cassandra']['units'].items():
- hosts.append(d['public-address'])
- cluster = Cluster(hosts, auth_provider=auth_provider, port=port)
- self.addCleanup(cluster.shutdown)
- return cluster
-
- def session(self):
- '''A session using the server's superuser credentials.'''
- session = self.cluster().connect()
- self.addCleanup(session.shutdown)
- return session
-
- def client_session(self, relname):
- '''A session using the client's credentials.
-
- We currently just steal the client's credentials and use
- them from the local machine, but we could tunnel through the
- client with a little more effort.
- '''
- relinfo = self.get_client_relinfo(relname)
- self.assertIn('host', relinfo.keys())
- cluster = self.cluster(relinfo['username'],
- relinfo['password'],
- [relinfo['host']],
- int(relinfo['native_transport_port']))
- session = cluster.connect()
- self.addCleanup(session.shutdown)
- return session
-
- keyspace_ids = count()
-
- def new_keyspace(self, session, rf=None):
- if rf is None:
- # We create a keyspace with a replication factor equal
- # to the number of units. This ensures that all records
- # are replicated to all nodes, and we can cofirm that
- # all nodes are working by doing an insert with
- # ConsistencyLevel.ALL.
- rf = self.rf
- keyspace = 'test{}'.format(next(TestDeploymentBase.keyspace_ids))
- q = SimpleStatement(
- 'CREATE KEYSPACE {} WITH REPLICATION ='.format(keyspace) +
- "{'class': 'SimpleStrategy', 'replication_factor': %s}",
- consistency_level=ConsistencyLevel.ALL)
- session.execute(q, (rf,))
- session.set_keyspace(keyspace)
- return keyspace
-
- def get_client_relinfo(self, relname):
- # We only need one unit, even if rf > 1
- s = self.deployment.sentry['cassandra'][0]
- relinfo = s.relation(relname, 'client:{}'.format(relname))
- return relinfo
-
- def is_port_open(self, port):
- status = self.juju_status()
- detail = list(status['services']['cassandra']['units'].values())[0]
- address = detail['public-address']
- rc = subprocess.call(['nc', '-z', '-w', '2', address, str(port)])
- return rc == 0
-
- def reconfigure_cassandra(self, **overrides):
- config = dict()
- config.update(self.common_config)
- config.update(self.test_config)
- config.update(overrides)
- self.deployment.configure('cassandra', config)
- self.deployment.wait()
-
-
-class Test1UnitDeployment(TestDeploymentBase):
- """Tests run on both a single node cluster and a 3 node cluster."""
- rf = 1
- test_config = dict(jre='openjdk')
-
- def test_basics_unit_superuser(self):
- # Basic tests using unit superuser credentials
- session = self.session()
- self.new_keyspace(session)
- self._test_database_basics(session)
-
- def test_basics_client_relation(self):
- # Create a keyspace using superuser credentials
- super_session = self.session()
- keyspace = self.new_keyspace(super_session)
-
- # Basic tests using standard client relation credentials.
- session = self.client_session('database')
- session.set_keyspace(keyspace)
- self._test_database_basics(session)
-
- def test_basics_client_admin_relation(self):
- # Basic tests using administrative client relation credentials.
- session = self.client_session('database-admin')
- self.new_keyspace(session)
- self._test_database_basics(session)
-
- def _test_database_basics(self, session):
- session.execute('CREATE TABLE Foo (x varchar PRIMARY KEY)')
-
- # Insert some data, ensuring that it has been stored on
- # all of our juju units. Note that the replication factor
- # of our keyspace has been set to the number of units we
- # deployed. Because it might take a while for the cluster to get
- # its act together, we retry this in a loop with a timeout.
- timeout = time.time() + 120
- while True:
- value = 'hello {}'.format(time.time())
- query = SimpleStatement(
- "INSERT INTO Foo (x) VALUES (%s)",
- consistency_level=ConsistencyLevel.ALL)
- try:
- session.execute(query, (value,))
- break
- except Exception:
- if time.time() > timeout:
- raise
-
- # We can get the data out again. This isn't testing our charm,
- # but nice to know anyway...
- r = session.execute('SELECT * FROM Foo LIMIT 1')
- self.assertTrue(r[0].x.startswith('hello'))
-
- def test_external_mount(self):
- # Not only does this test migrating data from local disk to an
- # external mount, it also exercises the rolling restart logic.
- # If rf==1, the restart will happen in the
- # storage-relation-changed hook as soon as the mount is ready.
- # If rf > 1, the restart will happen in the
- # cluster-relation-changed hook once the unit has determined
- # that it is its turn to restart.
-
- # First, create a keyspace pre-migration so we can confirm the
- # data was migrated rather than being reset to an empty system.
- session = self.session()
- keyspace = self.new_keyspace(session)
- session.execute('CREATE TABLE dat (x varchar PRIMARY KEY)')
- total = self.rf * 50
- q = SimpleStatement('INSERT INTO dat (x) VALUES (%s)')
- for _ in range(0, total):
- session.execute(q, (str(uuid.uuid1()),))
- session.shutdown()
-
- self.deployment.relate('cassandra:data', 'storage:data')
- self.deployment.wait()
- # Per Bug #1254766 and Bug #1254766, the sentry.wait() above
- # will return before the hooks have actually finished running
- # and data migrated. Instead, keep checking until our condition
- # is met, or a timeout reached.
- timeout = time.time() + 300
- for s in self.deployment.sentry['cassandra']:
- unit = s.info['unit_name']
- unit_num = s.info['unit']
- with self.subTest(unit=unit):
- while True:
- # Attempting to diagnose Amulet failures. I suspect
- # SSH host keys again, per Bug #802117
- try:
- s.directory_contents('/')
- except (subprocess.CalledProcessError, OSError):
- self.skipTest('sentry[{!r}].directory_contents({!r}) '
- 'failed!'.format(unit, '/'))
- parents = ['/srv', '/srv/cassandra_{}'.format(unit_num),
- '/srv/cassandra_{}/cassandra'.format(unit_num)]
- for path in parents:
- try:
- s.directory_contents('/srv')
- except (subprocess.CalledProcessError, OSError):
- raise AssertionError('Failed to scan {!r} on {}'
- .format(path, unit))
- try:
- contents = s.directory_contents(
- '/srv/cassandra_{}/cassandra/data'.format(
- unit_num))
- found = set(contents['directories'])
- self.assertIn(keyspace, found)
- self.assertIn('system', found)
- break
- except Exception:
- if time.time() > timeout:
- raise
- time.sleep(5)
-
- # Confirm no data has been lost, which could happen if we badly
- # shutdown and memtables were not flushed.
- session = self.session()
- session.set_keyspace(keyspace)
- q = SimpleStatement('SELECT COUNT(*) FROM dat',
- consistency_level=ConsistencyLevel.QUORUM)
- results = session.execute(q)
- self.assertEqual(results[0][0], total)
-
- def test_cluster_ports_closed(self):
- # The internal Cassandra ports are protected by ufw firewall
- # rules, and are closed to everyone except for peers and the
- # force_seed_nodes list. This is required for security, since
- # the protocols are unauthenticated. It also stops rogue nodes
- # on failed units from rejoining the cluster and causing chaos.
- self.assertFalse(self.is_port_open(7000), 'Storage port open')
- self.assertFalse(self.is_port_open(7001), 'SSL Storage port open')
- self.assertFalse(self.is_port_open(7199), 'JMX port open')
-
- def test_client_ports_open(self):
- self.assertTrue(self.is_port_open(9042), 'Native trans port closed')
- self.assertTrue(self.is_port_open(9160), 'Thrift RPC port closed')
-
- def test_default_superuser_account_closed(self):
- cluster = self.cluster(username='cassandra', password='cassandra')
- try:
- cluster.connect()
- self.fail('Default credentials not reset')
- except NoHostAvailable as x:
- for fail in x.errors.values():
- self.assertIsInstance(fail, AuthenticationFailed)
-
- def test_cqlsh(self):
- unit = self.deployment.sentry['cassandra'][0].info['unit_name']
- subprocess.check_output(['juju', 'ssh', unit,
- 'sudo -H cqlsh -e exit'],
- stderr=subprocess.STDOUT)
-
- def test_z_add_and_drop_node(self): # 'z' to run this test last.
- # We need to be able to add a node correctly into the ring,
- # without an operator needing to repair keyspaces to ensure data
- # is located on the expected nodes.
- # To test this, first create a keyspace with rf==1 and put enough
- # data in it so each node will have some.
- cluster = self.cluster()
- s = cluster.connect()
- keyspace = self.new_keyspace(s, rf=1)
- q = SimpleStatement('CREATE TABLE dat (x varchar PRIMARY KEY)',
- consistency_level=ConsistencyLevel.ALL)
- s.execute(q)
-
- total = self.rf * 50
- q = SimpleStatement('INSERT INTO dat (x) VALUES (%s)',
- consistency_level=ConsistencyLevel.QUORUM)
- for _ in range(0, total):
- s.execute(q, (str(uuid.uuid1()),))
- cluster.shutdown()
-
- def count(expected):
- until = time.time() + 180
- while True:
- cluster = self.cluster()
- try:
- s = cluster.connect(keyspace)
- results = s.execute(SimpleStatement(
- 'SELECT count(*) FROM dat',
- consistency_level=ConsistencyLevel.QUORUM))
- found = results[0][0]
- if found == expected or time.time() > until:
- return found
- time.sleep(0.2)
- except Unavailable:
- if time.time() > until:
- raise
- finally:
- cluster.shutdown()
-
- self.assertEqual(count(total), total)
-
- self.deployment.add_unit('cassandra')
- self.deployment.wait()
- status = self.juju_status()
- unit = sorted(status['services']['cassandra']['units'].keys())[-1]
- try:
- self.assertEqual(count(total), total)
- finally:
- # When a node is dropped, it needs to decommission itself and
- # move its data to the remaining nodes so no data is lost.
- # Alas, per Bug #1417874 we can't yet do this with Juju.
- # First, the node must be manually decommissioned before we
- # remove the unit.
- self._decommission(unit)
- self.deployment.remove_unit(unit)
- self.deployment.wait()
-
- self.assertEqual(count(total), total)
-
- def _decommission(self, unit):
- until = time.time() + WAIT_TIMEOUT
- while True:
- try:
- subprocess.check_output(['juju', 'run', '--unit', unit,
- 'nodetool decommission'],
- stderr=subprocess.STDOUT,
- universal_newlines=True)
- break
- except subprocess.CalledProcessError:
- if time.time() > until:
- raise
-
- until = time.time() + WAIT_TIMEOUT
- while True:
- try:
- cmd = ['juju', 'run', '--unit', unit, 'nodetool netstats']
- raw = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
- universal_newlines=True)
- if 'Mode: DECOMMISSIONED' in raw:
- return
- if time.time() > until:
- raise subprocess.TimeoutExpired(cmd, WAIT_TIMEOUT, raw)
- except subprocess.CalledProcessError:
- if time.time() > until:
- raise
- time.sleep(3)
-
-
-class Test3UnitDeployment(Test1UnitDeployment):
- """Tests run on a three node cluster."""
- rf = 3
-
-
-_jre_url = None
-
-
-def _serve(cwd, host, port):
- sys.stderr = open('/dev/null', 'w')
- os.chdir(cwd)
- httpd = http.server.HTTPServer((host, port),
- http.server.SimpleHTTPRequestHandler)
- httpd.serve_forever()
-
-
-_procs = []
-
-
-def get_jre_url():
- '''Return the URL to the Oracle Java SE 8 Server Runtime tarball, or None.
-
- The tarball needs to be placed in ../lib.
-
- Spawns a web server as a subprocess to serve the file.
- '''
- global _jre_url
- if _jre_url is not None:
- return _jre_url
-
- jre_dir = os.path.join(ROOT, 'lib')
-
- jre_tarballs = sorted(glob.glob(os.path.join(jre_dir,
- 'server-jre-?u*.tar.gz')))
- if not jre_tarballs:
- return None
-
- # Get the local IP address, only available via hackish means and
- # quite possibly incorrect.
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.connect(('www.canonical.com', 80))
- host = s.getsockname()[0]
- s.close()
-
- # Get a free port.
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.bind((host, 0))
- port = s.getsockname()[1]
- s.close()
-
- p = multiprocessing.Process(target=_serve, args=(jre_dir, host, port),
- daemon=True)
- p.start()
- _procs.append(p)
-
- _jre_url = 'http://{}:{}/{}'.format(host, port,
- os.path.basename(jre_tarballs[-1]))
- return _jre_url
-
-
-class TestOracleJREDeployment(Test1UnitDeployment):
- """Basic test with the Oracle JRE.
-
- Unfortunately these tests cannot be run by the automatic test runners,
- as the Oracle JRE is protected from public download by Oracle's
- click-through license agreement.
- """
- rf = 1
- test_config = dict(jre='Oracle', edition='community',
- private_jre_url=get_jre_url())
-
- @classmethod
- @unittest.skipUnless(get_jre_url(), 'No Oracle JRE tarballs available')
- def setUpClass(cls):
- super(TestOracleJREDeployment, cls).setUpClass()
-
-
-class TestDSEDeployment(Test1UnitDeployment):
- """Tests run a single node DataStax Enterprise cluster.
-
- Unfortunately these tests cannot be run by the automatic test
- runners, as the DSE packages are not available for public download.
- """
- rf = 1
- test_config = dict(
- edition='DSE', # Forces Oracle JRE
- install_sources=yaml.safe_dump([os.environ.get('DSE_SOURCE'),
- 'ppa:stub/cassandra']),
- install_keys=yaml.safe_dump([None, None]),
- private_jre_url=get_jre_url())
-
- @classmethod
- @unittest.skipUnless(get_jre_url(), 'No Oracle JRE tarballs available')
- @unittest.skipIf('DSE_SOURCE' not in os.environ,
- 'DSE_SOURCE environment variable not configured')
- def setUpClass(cls):
- super(TestDSEDeployment, cls).setUpClass()
-
-
-class TestAllowAllAuthenticatorDeployment(Test3UnitDeployment):
- test_config = dict(authenticator='AllowAllAuthenticator')
-
- def cluster(self, username=None, password=None, hosts=None, port=9042):
- '''A cluster using invalid credentials.'''
- return super(TestAllowAllAuthenticatorDeployment,
- self).cluster(username='wat', password='eva')
-
- def client_session(self, relname):
- '''A session using invalid credentials.'''
- relinfo = self.get_client_relinfo(relname)
- self.assertIn('host', relinfo.keys())
- cluster = self.cluster('random', 'nonsense',
- [relinfo['host']],
- int(relinfo['native_transport_port']))
- session = cluster.connect()
- self.addCleanup(session.shutdown)
- return session
-
- test_default_superuser_account_closed = None
-
-
-class Test20Deployment(Test1UnitDeployment):
- """Tests run on a single node Apache Cassandra 2.0 cluster.
- """
- rf = 1
- test_config = dict(
- edition='community',
- install_sources=yaml.safe_dump([
- 'ppa:stub/cassandra',
- 'ppa:openjdk-r/ppa',
- 'deb http://www.apache.org/dist/cassandra/debian 20x main']),
- install_keys=yaml.safe_dump([None, None, None]))
-
-
-class Test21Deployment(Test1UnitDeployment):
- """Tests run on a single node Apache Cassandra 2.1 cluster.
- """
- rf = 1
- test_config = dict(
- edition='community',
- install_sources=yaml.safe_dump([
- 'ppa:stub/cassandra',
- 'ppa:openjdk-r/ppa',
- 'deb http://www.apache.org/dist/cassandra/debian 21x main']),
- install_keys=yaml.safe_dump([None, None, None]))
-
-
-class Test30Deployment(Test1UnitDeployment):
- """Tests run on a single node Apache Cassandra 3.0 cluster.
- """
- rf = 1
- test_config = dict(
- edition='community',
- install_sources=yaml.safe_dump([
- 'ppa:stub/cassandra',
- 'ppa:openjdk-r/ppa',
- 'deb http://www.apache.org/dist/cassandra/debian 30x main']),
- install_keys=yaml.safe_dump([None, None, None]))
-
-
-# Bug #1417097 means we need to monkey patch Amulet for now.
-real_juju = amulet.helpers.juju
-
-
-@wraps(real_juju)
-def patched_juju(args, env=None):
- args = [str(a) for a in args]
- return real_juju(args, env)
-
-amulet.helpers.juju = patched_juju
-amulet.deployer.juju = patched_juju
-
-
-if __name__ == '__main__':
- unittest.main(verbosity=2)
diff --git a/charms/trusty/cassandra/tests/tests.yaml b/charms/trusty/cassandra/tests/tests.yaml
deleted file mode 100644
index fbbd7f0..0000000
--- a/charms/trusty/cassandra/tests/tests.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-bootstrap: true
-reset: false
-tests: ""
-virtualenv: false
-# sources: []
-# packages: []
-makefile:
- - lint
- - unittest
- - Test1UnitDeployment
- - Test3UnitDeployment
- - Test20Deployment
- - Test21Deployment
- - Test30Deployment
- - TestAllowAllAuthenticatorDeployment
diff --git a/charms/trusty/ceilometer-agent/.bzr/README b/charms/trusty/ceilometer-agent/.bzr/README
deleted file mode 100644
index f82dc1c..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a Bazaar control directory.
-Do not change any files in this directory.
-See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/ceilometer-agent/.bzr/branch-format b/charms/trusty/ceilometer-agent/.bzr/branch-format
deleted file mode 100644
index 9eb09b7..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/branch-format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/ceilometer-agent/.bzr/branch/format b/charms/trusty/ceilometer-agent/.bzr/branch/format
deleted file mode 100644
index b391ffd..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/branch/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG Branch Reference Format 1
diff --git a/charms/trusty/ceilometer-agent/.bzr/branch/location b/charms/trusty/ceilometer-agent/.bzr/branch/location
deleted file mode 100644
index c5163ec..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/branch/location
+++ /dev/null
@@ -1 +0,0 @@
-bzr+ssh://bazaar.launchpad.net/~openstack-charmers/charms/trusty/ceilometer-agent/trunk/ \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/.bzr/checkout/conflicts b/charms/trusty/ceilometer-agent/.bzr/checkout/conflicts
deleted file mode 100644
index 0dc2d3a..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/checkout/conflicts
+++ /dev/null
@@ -1 +0,0 @@
-BZR conflict list format 1
diff --git a/charms/trusty/ceilometer-agent/.bzr/checkout/dirstate b/charms/trusty/ceilometer-agent/.bzr/checkout/dirstate
deleted file mode 100644
index 8e888ce..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/checkout/dirstate
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/ceilometer-agent/.bzr/checkout/format b/charms/trusty/ceilometer-agent/.bzr/checkout/format
deleted file mode 100644
index e0261c7..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/checkout/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/ceilometer-agent/.bzr/checkout/views b/charms/trusty/ceilometer-agent/.bzr/checkout/views
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer-agent/.bzr/checkout/views
+++ /dev/null
diff --git a/charms/trusty/ceilometer-agent/.coveragerc b/charms/trusty/ceilometer-agent/.coveragerc
deleted file mode 100644
index ed08ec9..0000000
--- a/charms/trusty/ceilometer-agent/.coveragerc
+++ /dev/null
@@ -1,6 +0,0 @@
-[report]
-# Regexes for lines to exclude from consideration
-exclude_lines =
- if __name__ == .__main__.:
-include=
- hooks/ceilometer_*
diff --git a/charms/trusty/ceilometer-agent/.gitignore b/charms/trusty/ceilometer-agent/.gitignore
deleted file mode 100644
index 80007cf..0000000
--- a/charms/trusty/ceilometer-agent/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-bin
-.coverage
-.testrepository
-.tox
-tags
-*.sw[nop]
-*.pyc
-.unit-state.db
-trusty
diff --git a/charms/trusty/ceilometer-agent/.gitreview b/charms/trusty/ceilometer-agent/.gitreview
deleted file mode 100644
index 1b9d858..0000000
--- a/charms/trusty/ceilometer-agent/.gitreview
+++ /dev/null
@@ -1,5 +0,0 @@
-[gerrit]
-host=review.openstack.org
-port=29418
-project=openstack/charm-ceilometer-agent.git
-defaultbranch=stable/16.07
diff --git a/charms/trusty/ceilometer-agent/.project b/charms/trusty/ceilometer-agent/.project
deleted file mode 100644
index 3c0a6a8..0000000
--- a/charms/trusty/ceilometer-agent/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<projectDescription>
- <name>ceilometer-agent</name>
- <comment></comment>
- <projects>
- </projects>
- <buildSpec>
- <buildCommand>
- <name>org.python.pydev.PyDevBuilder</name>
- <arguments>
- </arguments>
- </buildCommand>
- </buildSpec>
- <natures>
- <nature>org.python.pydev.pythonNature</nature>
- </natures>
-</projectDescription>
diff --git a/charms/trusty/ceilometer-agent/.pydevproject b/charms/trusty/ceilometer-agent/.pydevproject
deleted file mode 100644
index ce82d9d..0000000
--- a/charms/trusty/ceilometer-agent/.pydevproject
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?eclipse-pydev version="1.0"?><pydev_project>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
-<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
-<path>/ceilometer-agent/hooks</path>
-<path>/ceilometer-agent/unit_tests</path>
-</pydev_pathproperty>
-</pydev_project>
diff --git a/charms/trusty/ceilometer-agent/.testr.conf b/charms/trusty/ceilometer-agent/.testr.conf
deleted file mode 100644
index 801646b..0000000
--- a/charms/trusty/ceilometer-agent/.testr.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
- OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
- OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
- ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
-
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/charms/trusty/ceilometer-agent/LICENSE b/charms/trusty/ceilometer-agent/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/charms/trusty/ceilometer-agent/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/Makefile b/charms/trusty/ceilometer-agent/Makefile
deleted file mode 100644
index d7ad3c2..0000000
--- a/charms/trusty/ceilometer-agent/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-lint:
- @tox -e pep8
-
-test:
- @echo Starting unit tests...
- @tox -e py27
-
-functional_test:
- @echo Starting functional tests...
- @tox -e func27
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
-
-publish: lint test
- @bzr push lp:charms/ceilometer-agent
- @bzr push lp:charms/trusty/ceilometer-agent
diff --git a/charms/trusty/ceilometer-agent/README.md b/charms/trusty/ceilometer-agent/README.md
deleted file mode 100644
index 498bce1..0000000
--- a/charms/trusty/ceilometer-agent/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-Overview
---------
-
-This charm provides the Ceilometer Compute Agent for OpenStack.
-It must be deployed in each nova-compute node.
-See Ceilometer charm (lp:~yolanda.robla/charms/precise/ceilometer/trunk)
-for usage details.
diff --git a/charms/trusty/ceilometer-agent/actions.yaml b/charms/trusty/ceilometer-agent/actions.yaml
deleted file mode 100644
index 182074f..0000000
--- a/charms/trusty/ceilometer-agent/actions.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pause:
- description: Pause the ceilometer-agent unit. This action will stop ceilometer-agent services.
-resume:
- descrpition: Resume the ceilometer-agent unit. This action will start ceilometer-agent services.
-openstack-upgrade:
- description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
diff --git a/charms/trusty/ceilometer-agent/actions/actions.py b/charms/trusty/ceilometer-agent/actions/actions.py
deleted file mode 100755
index ee6a76b..0000000
--- a/charms/trusty/ceilometer-agent/actions/actions.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-
-sys.path.append('hooks/')
-
-from charmhelpers.core.hookenv import action_fail
-from ceilometer_utils import (
- pause_unit_helper,
- resume_unit_helper,
- register_configs,
-)
-
-
-def pause(args):
- """Pause the Ceilometer services.
- @raises Exception should the service fail to stop.
- """
- pause_unit_helper(register_configs())
-
-
-def resume(args):
- """Resume the Ceilometer services.
- @raises Exception should the service fail to start."""
- resume_unit_helper(register_configs())
-
-
-# A dictionary of all the defined actions to callables (which take
-# parsed arguments).
-ACTIONS = {"pause": pause, "resume": resume}
-
-
-def main(args):
- action_name = os.path.basename(args[0])
- try:
- action = ACTIONS[action_name]
- except KeyError:
- return "Action %s undefined" % action_name
- else:
- try:
- action(args)
- except Exception as e:
- action_fail(str(e))
-
-
-if __name__ == "__main__":
- sys.exit(main(sys.argv))
diff --git a/charms/trusty/ceilometer-agent/actions/openstack-upgrade b/charms/trusty/ceilometer-agent/actions/openstack-upgrade
deleted file mode 120000
index 6179301..0000000
--- a/charms/trusty/ceilometer-agent/actions/openstack-upgrade
+++ /dev/null
@@ -1 +0,0 @@
-openstack_upgrade.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/actions/openstack_upgrade.py b/charms/trusty/ceilometer-agent/actions/openstack_upgrade.py
deleted file mode 100755
index da6f160..0000000
--- a/charms/trusty/ceilometer-agent/actions/openstack_upgrade.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-sys.path.append('hooks/')
-
-from charmhelpers.contrib.openstack.utils import (
- do_action_openstack_upgrade,
-)
-
-from ceilometer_hooks import (
- config_changed,
- CONFIGS,
-)
-
-from ceilometer_utils import (
- do_openstack_upgrade,
-)
-
-
-def openstack_upgrade():
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed."""
-
- if (do_action_openstack_upgrade('ceilometer-common',
- do_openstack_upgrade,
- CONFIGS)):
- config_changed()
-
-if __name__ == '__main__':
- openstack_upgrade()
diff --git a/charms/trusty/ceilometer-agent/actions/pause b/charms/trusty/ceilometer-agent/actions/pause
deleted file mode 120000
index 405a394..0000000
--- a/charms/trusty/ceilometer-agent/actions/pause
+++ /dev/null
@@ -1 +0,0 @@
-actions.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/actions/resume b/charms/trusty/ceilometer-agent/actions/resume
deleted file mode 120000
index 405a394..0000000
--- a/charms/trusty/ceilometer-agent/actions/resume
+++ /dev/null
@@ -1 +0,0 @@
-actions.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/charm-helpers-hooks.yaml b/charms/trusty/ceilometer-agent/charm-helpers-hooks.yaml
deleted file mode 100644
index 34a541b..0000000
--- a/charms/trusty/ceilometer-agent/charm-helpers-hooks.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-branch: lp:~openstack-charmers/charm-helpers/stable
-destination: hooks/charmhelpers
-include:
- - core
- - cli
- - fetch
- - contrib.openstack
- - contrib.hahelpers
- - contrib.storage.linux
- - contrib.network.ip
- - contrib.python.packages
- - contrib.charmsupport
diff --git a/charms/trusty/ceilometer-agent/charm-helpers-tests.yaml b/charms/trusty/ceilometer-agent/charm-helpers-tests.yaml
deleted file mode 100644
index c469f13..0000000
--- a/charms/trusty/ceilometer-agent/charm-helpers-tests.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:~openstack-charmers/charm-helpers/stable
-destination: tests/charmhelpers
-include:
- - contrib.amulet
- - contrib.openstack.amulet
diff --git a/charms/trusty/ceilometer-agent/config.yaml b/charms/trusty/ceilometer-agent/config.yaml
deleted file mode 100644
index e276cbf..0000000
--- a/charms/trusty/ceilometer-agent/config.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-options:
- openstack-origin:
- default: distro
- type: string
- description: |
- Repository from which to install. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported Cloud Archive release pocket.
-
- Supported Cloud Archive sources include:
-
- cloud:<series>-<openstack-release>
- cloud:<series>-<openstack-release>/updates
- cloud:<series>-<openstack-release>/staging
- cloud:<series>-<openstack-release>/proposed
-
- For series=Precise we support cloud archives for openstack-release:
- * icehouse
-
- For series=Trusty we support cloud archives for openstack-release:
- * juno
- * kilo
- * ...
-
- NOTE: updating this setting to a source that is known to provide
- a later version of OpenStack will trigger a software upgrade.
- nagios_context:
- default: "juju"
- type: string
- description: |
- Used by the nrpe-external-master subordinate charm.
- A string that will be prepended to instance name to set the host name
- in nagios. So for instance the hostname would be something like:
- juju-myservice-0
- If you're running multiple environments with the same services in them
- this allows you to differentiate between them.
- nagios_servicegroups:
- default: ""
- type: string
- description: |
- A comma-separated list of nagios servicegroups.
- If left empty, the nagios_context will be used as the servicegroup
- action-managed-upgrade:
- type: boolean
- default: False
- description: |
- If True enables openstack upgrades for this charm via juju actions.
- You will still need to set openstack-origin to the new repository but
- instead of an upgrade running automatically across all units, it will
- wait for you to execute the openstack-upgrade action for this charm on
- each unit. If False it will revert to existing behavior of upgrading
- all units on config change.
- use-internal-endpoints:
- default: False
- type: boolean
- description: |
- Openstack mostly defaults to using public endpoints for
- internal communication between services. If set to True this option will
- configure services to use internal endpoints where possible.
diff --git a/charms/trusty/ceilometer-agent/copyright b/charms/trusty/ceilometer-agent/copyright
deleted file mode 100644
index 112ffbe..0000000
--- a/charms/trusty/ceilometer-agent/copyright
+++ /dev/null
@@ -1,16 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2011, Canonical Ltd., All Rights Reserved.
-License: Apache-2.0
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/ceilometer-service-relation-changed b/charms/trusty/ceilometer-agent/hooks/ceilometer-service-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/ceilometer-service-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/ceilometer_contexts.py b/charms/trusty/ceilometer-agent/hooks/ceilometer_contexts.py
deleted file mode 100644
index 6523e79..0000000
--- a/charms/trusty/ceilometer-agent/hooks/ceilometer_contexts.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import base64
-import os
-
-from charmhelpers.core.hookenv import (
- relation_ids,
- relation_get,
- related_units,
-)
-
-from charmhelpers.contrib.openstack.context import (
- OSContextGenerator,
- context_complete
-)
-
-
-class CeilometerServiceContext(OSContextGenerator):
- interfaces = ['ceilometer-service']
- keys = [
- 'debug',
- 'verbose',
- 'rabbitmq_host',
- 'rabbitmq_user',
- 'rabbitmq_password',
- 'rabbitmq_virtual_host',
- 'auth_protocol',
- 'auth_host',
- 'auth_port',
- 'admin_tenant_name',
- 'admin_user',
- 'admin_password',
- 'metering_secret'
- ]
-
- optional_keys = [
- 'rabbit_ssl_port',
- 'rabbit_ssl_ca'
- ]
-
- def __init__(self, ssl_dir=None):
- self.ssl_dir = ssl_dir
-
- def __call__(self):
- for relid in relation_ids('ceilometer-service'):
- for unit in related_units(relid):
- conf = {}
- for attr in self.keys:
- conf[attr] = relation_get(
- attr, unit=unit, rid=relid)
- if context_complete(conf):
- for attr in self.optional_keys:
- conf[attr] = relation_get(attr, unit=unit, rid=relid)
- if conf.get('rabbit_ssl_ca') is not None:
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(base64.b64decode(conf['rabbit_ssl_ca']))
- conf['rabbit_ssl_ca'] = ca_path
- return conf
- return {}
diff --git a/charms/trusty/ceilometer-agent/hooks/ceilometer_hooks.py b/charms/trusty/ceilometer-agent/hooks/ceilometer_hooks.py
deleted file mode 100755
index d41e190..0000000
--- a/charms/trusty/ceilometer-agent/hooks/ceilometer_hooks.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import json
-from charmhelpers.fetch import (
- apt_install, filter_installed_packages,
- apt_update
-)
-from charmhelpers.core.hookenv import (
- config,
- Hooks, UnregisteredHookError,
- log,
- is_relation_made,
- relation_set,
- status_set,
-)
-from charmhelpers.contrib.openstack.utils import (
- configure_installation_source,
- openstack_upgrade_available,
- pausable_restart_on_change as restart_on_change,
-)
-from ceilometer_utils import (
- restart_map,
- services,
- register_configs,
- CEILOMETER_AGENT_PACKAGES,
- NOVA_SETTINGS,
- do_openstack_upgrade,
- assess_status,
-)
-from charmhelpers.contrib.charmsupport import nrpe
-
-hooks = Hooks()
-CONFIGS = register_configs()
-
-
-@hooks.hook()
-def install():
- origin = config('openstack-origin')
- configure_installation_source(origin)
- status_set('maintenance', 'Installing apt packages')
- apt_update(fatal=True)
- apt_install(
- filter_installed_packages(CEILOMETER_AGENT_PACKAGES),
- fatal=True)
-
-
-@hooks.hook('nova-ceilometer-relation-joined')
-def nova_ceilometer_joined():
- relation_set(subordinate_configuration=json.dumps(NOVA_SETTINGS))
-
-
-@hooks.hook("ceilometer-service-relation-changed",
- "upgrade-charm")
-@restart_on_change(restart_map())
-def ceilometer_changed():
- CONFIGS.write_all()
- if is_relation_made('nrpe-external-master'):
- update_nrpe_config()
-
-
-@hooks.hook('config-changed')
-@restart_on_change(restart_map(), stopstart=True)
-def config_changed():
- if not config('action-managed-upgrade'):
- if openstack_upgrade_available('ceilometer-common'):
- status_set('maintenance', 'Running openstack upgrade')
- do_openstack_upgrade(CONFIGS)
- if is_relation_made('nrpe-external-master'):
- update_nrpe_config()
- CONFIGS.write_all()
-
-
-@hooks.hook('nrpe-external-master-relation-joined',
- 'nrpe-external-master-relation-changed')
-def update_nrpe_config():
- # python-dbus is used by check_upstart_job
- apt_install('python-dbus')
- hostname = nrpe.get_nagios_hostname()
- current_unit = nrpe.get_nagios_unit_name()
- nrpe_setup = nrpe.NRPE(hostname=hostname)
- nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
- nrpe_setup.write()
-
-
-if __name__ == '__main__':
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log('Unknown hook {} - skipping.'.format(e))
- # set_os_workload_status(CONFIGS, REQUIRED_INTERFACES)
- assess_status(CONFIGS)
diff --git a/charms/trusty/ceilometer-agent/hooks/ceilometer_utils.py b/charms/trusty/ceilometer-agent/hooks/ceilometer_utils.py
deleted file mode 100644
index a89795c..0000000
--- a/charms/trusty/ceilometer-agent/hooks/ceilometer_utils.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from charmhelpers.contrib.openstack import (
- context,
- templating,
-)
-from ceilometer_contexts import (
- CeilometerServiceContext,
-)
-from charmhelpers.contrib.openstack.utils import (
- get_os_codename_package,
- get_os_codename_install_source,
- configure_installation_source,
- make_assess_status_func,
- pause_unit,
- resume_unit,
-)
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-from charmhelpers.fetch import apt_update, apt_install, apt_upgrade
-
-CEILOMETER_CONF_DIR = "/etc/ceilometer"
-CEILOMETER_CONF = "%s/ceilometer.conf" % CEILOMETER_CONF_DIR
-
-CEILOMETER_AGENT_SERVICES = ['ceilometer-agent-compute']
-
-CEILOMETER_AGENT_PACKAGES = [
- 'python-ceilometer', 'ceilometer-common',
- 'ceilometer-agent-compute'
-]
-
-NOVA_CONF = "/etc/nova/nova.conf"
-
-NOVA_SETTINGS = {
- "nova": {
- "/etc/nova/nova.conf": {
- "sections": {
- "DEFAULT": [
- ('instance_usage_audit', 'True'),
- ('instance_usage_audit_period', 'hour'),
- ('notify_on_state_change', 'vm_and_task_state'),
- ('notification_driver',
- 'ceilometer.compute.nova_notifier'),
- ('notification_driver',
- 'nova.openstack.common.notifier.rpc_notifier')
- ]
- }
- }
- }
-}
-
-CONFIG_FILES = {
- CEILOMETER_CONF: {
- 'hook_contexts': [
- CeilometerServiceContext(ssl_dir=CEILOMETER_CONF_DIR),
- context.InternalEndpointContext()],
- 'services': CEILOMETER_AGENT_SERVICES
- }
-}
-
-TEMPLATES = 'templates'
-
-REQUIRED_INTERFACES = {
- 'ceilometer': ['ceilometer-service'],
-}
-
-
-def register_configs():
- """
- Register config files with their respective contexts.
- Regstration of some configs may not be required depending on
- existing of certain relations.
- """
- # if called without anything installed (eg during install hook)
- # just default to earliest supported release. configs dont get touched
- # till post-install, anyway.
- release = get_os_codename_package('ceilometer-common', fatal=False) \
- or 'icehouse'
- configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
- openstack_release=release)
-
- for conf in CONFIG_FILES:
- configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
-
- return configs
-
-
-def restart_map():
- '''
- Determine the correct resource map to be passed to
- charmhelpers.core.restart_on_change() based on the services configured.
-
- :returns: dict: A dictionary mapping config file to lists of services
- that should be restarted when file changes.
- '''
- _map = {}
- for f, ctxt in CONFIG_FILES.iteritems():
- svcs = []
- for svc in ctxt['services']:
- svcs.append(svc)
- if svcs:
- _map[f] = svcs
- return _map
-
-
-def services():
- ''' Returns a list of services associate with this charm '''
- _services = []
- for v in restart_map().values():
- _services = _services + v
- return list(set(_services))
-
-
-def do_openstack_upgrade(configs):
- """
- Perform an upgrade. Takes care of upgrading packages, rewriting
- configs, database migrations and potentially any other post-upgrade
- actions.
-
- :param configs: The charms main OSConfigRenderer object.
- """
- new_src = config('openstack-origin')
- new_os_rel = get_os_codename_install_source(new_src)
-
- log('Performing OpenStack upgrade to %s.' % (new_os_rel))
-
- configure_installation_source(new_src)
- dpkg_opts = [
- '--option', 'Dpkg::Options::=--force-confnew',
- '--option', 'Dpkg::Options::=--force-confdef',
- ]
- apt_update(fatal=True)
- apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
- apt_install(packages=CEILOMETER_AGENT_PACKAGES,
- options=dpkg_opts,
- fatal=True)
-
- # set CONFIGS to load templates from new release
- configs.set_release(openstack_release=new_os_rel)
-
-
-def assess_status(configs):
- """Assess status of current unit
- Decides what the state of the unit should be based on the current
- configuration.
- SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
- status of the unit.
- Also calls status_set(...) directly if paused state isn't complete.
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- assess_status_func(configs)()
-
-
-def assess_status_func(configs):
- """Helper function to create the function that will assess_status() for
- the unit.
- Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
- create the appropriate status function and then returns it.
- Used directly by assess_status() and also for pausing and resuming
- the unit.
-
- NOTE(ajkavanagh) ports are not checked due to race hazards with services
- that don't behave sychronously w.r.t their service scripts. e.g.
- apache2.
- @param configs: a templating.OSConfigRenderer() object
- @return f() -> None : a function that assesses the unit's workload status
- """
- return make_assess_status_func(
- configs, REQUIRED_INTERFACES,
- services=services(), ports=None)
-
-
-def pause_unit_helper(configs):
- """Helper function to pause a unit, and then call assess_status(...) in
- effect, so that the status is correctly updated.
- Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- _pause_resume_helper(pause_unit, configs)
-
-
-def resume_unit_helper(configs):
- """Helper function to resume a unit, and then call assess_status(...) in
- effect, so that the status is correctly updated.
- Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- _pause_resume_helper(resume_unit, configs)
-
-
-def _pause_resume_helper(f, configs):
- """Helper function that uses the make_assess_status_func(...) from
- charmhelpers.contrib.openstack.utils to create an assess_status(...)
- function that can be used with the pause/resume of the unit
- @param f: the function to be used with the assess_status(...) function
- @returns None - this function is executed for its side-effect
- """
- # TODO(ajkavanagh) - ports= has been left off because of the race hazard
- # that exists due to service_start()
- f(assess_status_func(configs),
- services=services(),
- ports=None)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/__init__.py
deleted file mode 100644
index 4886788..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/__init__.py
deleted file mode 100644
index 389b490..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/__init__.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-import argparse
-import sys
-
-from six.moves import zip
-
-import charmhelpers.core.unitdata
-
-
-class OutputFormatter(object):
- def __init__(self, outfile=sys.stdout):
- self.formats = (
- "raw",
- "json",
- "py",
- "yaml",
- "csv",
- "tab",
- )
- self.outfile = outfile
-
- def add_arguments(self, argument_parser):
- formatgroup = argument_parser.add_mutually_exclusive_group()
- choices = self.supported_formats
- formatgroup.add_argument("--format", metavar='FMT',
- help="Select output format for returned data, "
- "where FMT is one of: {}".format(choices),
- choices=choices, default='raw')
- for fmt in self.formats:
- fmtfunc = getattr(self, fmt)
- formatgroup.add_argument("-{}".format(fmt[0]),
- "--{}".format(fmt), action='store_const',
- const=fmt, dest='format',
- help=fmtfunc.__doc__)
-
- @property
- def supported_formats(self):
- return self.formats
-
- def raw(self, output):
- """Output data as raw string (default)"""
- if isinstance(output, (list, tuple)):
- output = '\n'.join(map(str, output))
- self.outfile.write(str(output))
-
- def py(self, output):
- """Output data as a nicely-formatted python data structure"""
- import pprint
- pprint.pprint(output, stream=self.outfile)
-
- def json(self, output):
- """Output data in JSON format"""
- import json
- json.dump(output, self.outfile)
-
- def yaml(self, output):
- """Output data in YAML format"""
- import yaml
- yaml.safe_dump(output, self.outfile)
-
- def csv(self, output):
- """Output data as excel-compatible CSV"""
- import csv
- csvwriter = csv.writer(self.outfile)
- csvwriter.writerows(output)
-
- def tab(self, output):
- """Output data in excel-compatible tab-delimited format"""
- import csv
- csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
- csvwriter.writerows(output)
-
- def format_output(self, output, fmt='raw'):
- fmtfunc = getattr(self, fmt)
- fmtfunc(output)
-
-
-class CommandLine(object):
- argument_parser = None
- subparsers = None
- formatter = None
- exit_code = 0
-
- def __init__(self):
- if not self.argument_parser:
- self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
- if not self.formatter:
- self.formatter = OutputFormatter()
- self.formatter.add_arguments(self.argument_parser)
- if not self.subparsers:
- self.subparsers = self.argument_parser.add_subparsers(help='Commands')
-
- def subcommand(self, command_name=None):
- """
- Decorate a function as a subcommand. Use its arguments as the
- command-line arguments"""
- def wrapper(decorated):
- cmd_name = command_name or decorated.__name__
- subparser = self.subparsers.add_parser(cmd_name,
- description=decorated.__doc__)
- for args, kwargs in describe_arguments(decorated):
- subparser.add_argument(*args, **kwargs)
- subparser.set_defaults(func=decorated)
- return decorated
- return wrapper
-
- def test_command(self, decorated):
- """
- Subcommand is a boolean test function, so bool return values should be
- converted to a 0/1 exit code.
- """
- decorated._cli_test_command = True
- return decorated
-
- def no_output(self, decorated):
- """
- Subcommand is not expected to return a value, so don't print a spurious None.
- """
- decorated._cli_no_output = True
- return decorated
-
- def subcommand_builder(self, command_name, description=None):
- """
- Decorate a function that builds a subcommand. Builders should accept a
- single argument (the subparser instance) and return the function to be
- run as the command."""
- def wrapper(decorated):
- subparser = self.subparsers.add_parser(command_name)
- func = decorated(subparser)
- subparser.set_defaults(func=func)
- subparser.description = description or func.__doc__
- return wrapper
-
- def run(self):
- "Run cli, processing arguments and executing subcommands."
- arguments = self.argument_parser.parse_args()
- argspec = inspect.getargspec(arguments.func)
- vargs = []
- for arg in argspec.args:
- vargs.append(getattr(arguments, arg))
- if argspec.varargs:
- vargs.extend(getattr(arguments, argspec.varargs))
- output = arguments.func(*vargs)
- if getattr(arguments.func, '_cli_test_command', False):
- self.exit_code = 0 if output else 1
- output = ''
- if getattr(arguments.func, '_cli_no_output', False):
- output = ''
- self.formatter.format_output(output, arguments.format)
- if charmhelpers.core.unitdata._KV:
- charmhelpers.core.unitdata._KV.flush()
-
-
-cmdline = CommandLine()
-
-
-def describe_arguments(func):
- """
- Analyze a function's signature and return a data structure suitable for
- passing in as arguments to an argparse parser's add_argument() method."""
-
- argspec = inspect.getargspec(func)
- # we should probably raise an exception somewhere if func includes **kwargs
- if argspec.defaults:
- positional_args = argspec.args[:-len(argspec.defaults)]
- keyword_names = argspec.args[-len(argspec.defaults):]
- for arg, default in zip(keyword_names, argspec.defaults):
- yield ('--{}'.format(arg),), {'default': default}
- else:
- positional_args = argspec.args
-
- for arg in positional_args:
- yield (arg,), {}
- if argspec.varargs:
- yield (argspec.varargs,), {'nargs': '*'}
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/benchmark.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/benchmark.py
deleted file mode 100644
index 303af14..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/benchmark.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from . import cmdline
-from charmhelpers.contrib.benchmark import Benchmark
-
-
-@cmdline.subcommand(command_name='benchmark-start')
-def start():
- Benchmark.start()
-
-
-@cmdline.subcommand(command_name='benchmark-finish')
-def finish():
- Benchmark.finish()
-
-
-@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
-def service(subparser):
- subparser.add_argument("value", help="The composite score.")
- subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
- subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
- return Benchmark.set_composite_score
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/commands.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/commands.py
deleted file mode 100644
index b931056..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/commands.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-This module loads sub-modules into the python runtime so they can be
-discovered via the inspect module. In order to prevent flake8 from (rightfully)
-telling us these are unused modules, throw a ' # noqa' at the end of each import
-so that the warning is suppressed.
-"""
-
-from . import CommandLine # noqa
-
-"""
-Import the sub-modules which have decorated subcommands to register with chlp.
-"""
-from . import host # noqa
-from . import benchmark # noqa
-from . import unitdata # noqa
-from . import hookenv # noqa
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/hookenv.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/hookenv.py
deleted file mode 100644
index bd72f44..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/hookenv.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from . import cmdline
-from charmhelpers.core import hookenv
-
-
-cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
-cmdline.subcommand('service-name')(hookenv.service_name)
-cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/host.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/host.py
deleted file mode 100644
index 4039684..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/host.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from . import cmdline
-from charmhelpers.core import host
-
-
-@cmdline.subcommand()
-def mounts():
- "List mounts"
- return host.mounts()
-
-
-@cmdline.subcommand_builder('service', description="Control system services")
-def service(subparser):
- subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
- subparser.add_argument("service_name", help="Name of the service to control")
- return host.service
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/unitdata.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/unitdata.py
deleted file mode 100644
index c572858..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/cli/unitdata.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from . import cmdline
-from charmhelpers.core import unitdata
-
-
-@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
-def unitdata_cmd(subparser):
- nested = subparser.add_subparsers()
- get_cmd = nested.add_parser('get', help='Retrieve data')
- get_cmd.add_argument('key', help='Key to retrieve the value of')
- get_cmd.set_defaults(action='get', value=None)
- set_cmd = nested.add_parser('set', help='Store data')
- set_cmd.add_argument('key', help='Key to set')
- set_cmd.add_argument('value', help='Value to store')
- set_cmd.set_defaults(action='set')
-
- def _unitdata_cmd(action, key, value):
- if action == 'get':
- return unitdata.kv().get(key)
- elif action == 'set':
- unitdata.kv().set(key, value)
- unitdata.kv().flush()
- return ''
- return _unitdata_cmd
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/nrpe.py
deleted file mode 100644
index 17976fb..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ /dev/null
@@ -1,396 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Compatibility with the nrpe-external-master charm"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import subprocess
-import pwd
-import grp
-import os
-import glob
-import shutil
-import re
-import shlex
-import yaml
-
-from charmhelpers.core.hookenv import (
- config,
- local_unit,
- log,
- relation_ids,
- relation_set,
- relations_of_type,
-)
-
-from charmhelpers.core.host import service
-
-# This module adds compatibility with the nrpe-external-master and plain nrpe
-# subordinate charms. To use it in your charm:
-#
-# 1. Update metadata.yaml
-#
-# provides:
-# (...)
-# nrpe-external-master:
-# interface: nrpe-external-master
-# scope: container
-#
-# and/or
-#
-# provides:
-# (...)
-# local-monitors:
-# interface: local-monitors
-# scope: container
-
-#
-# 2. Add the following to config.yaml
-#
-# nagios_context:
-# default: "juju"
-# type: string
-# description: |
-# Used by the nrpe subordinate charms.
-# A string that will be prepended to instance name to set the host name
-# in nagios. So for instance the hostname would be something like:
-# juju-myservice-0
-# If you're running multiple environments with the same services in them
-# this allows you to differentiate between them.
-# nagios_servicegroups:
-# default: ""
-# type: string
-# description: |
-# A comma-separated list of nagios servicegroups.
-# If left empty, the nagios_context will be used as the servicegroup
-#
-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
-#
-# 4. Update your hooks.py with something like this:
-#
-# from charmsupport.nrpe import NRPE
-# (...)
-# def update_nrpe_config():
-# nrpe_compat = NRPE()
-# nrpe_compat.add_check(
-# shortname = "myservice",
-# description = "Check MyService",
-# check_cmd = "check_http -w 2 -c 10 http://localhost"
-# )
-# nrpe_compat.add_check(
-# "myservice_other",
-# "Check for widget failures",
-# check_cmd = "/srv/myapp/scripts/widget_check"
-# )
-# nrpe_compat.write()
-#
-# def config_changed():
-# (...)
-# update_nrpe_config()
-#
-# def nrpe_external_master_relation_changed():
-# update_nrpe_config()
-#
-# def local_monitors_relation_changed():
-# update_nrpe_config()
-#
-# 5. ln -s hooks.py nrpe-external-master-relation-changed
-# ln -s hooks.py local-monitors-relation-changed
-
-
-class CheckException(Exception):
- pass
-
-
-class Check(object):
- shortname_re = '[A-Za-z0-9-_]+$'
- service_template = ("""
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-define service {{
- use active-service
- host_name {nagios_hostname}
- service_description {nagios_hostname}[{shortname}] """
- """{description}
- check_command check_nrpe!{command}
- servicegroups {nagios_servicegroup}
-}}
-""")
-
- def __init__(self, shortname, description, check_cmd):
- super(Check, self).__init__()
- # XXX: could be better to calculate this from the service name
- if not re.match(self.shortname_re, shortname):
- raise CheckException("shortname must match {}".format(
- Check.shortname_re))
- self.shortname = shortname
- self.command = "check_{}".format(shortname)
- # Note: a set of invalid characters is defined by the
- # Nagios server config
- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
- self.description = description
- self.check_cmd = self._locate_cmd(check_cmd)
-
- def _get_check_filename(self):
- return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
-
- def _get_service_filename(self, hostname):
- return os.path.join(NRPE.nagios_exportdir,
- 'service__{}_{}.cfg'.format(hostname, self.command))
-
- def _locate_cmd(self, check_cmd):
- search_path = (
- '/usr/lib/nagios/plugins',
- '/usr/local/lib/nagios/plugins',
- )
- parts = shlex.split(check_cmd)
- for path in search_path:
- if os.path.exists(os.path.join(path, parts[0])):
- command = os.path.join(path, parts[0])
- if len(parts) > 1:
- command += " " + " ".join(parts[1:])
- return command
- log('Check command not found: {}'.format(parts[0]))
- return ''
-
- def _remove_service_files(self):
- if not os.path.exists(NRPE.nagios_exportdir):
- return
- for f in os.listdir(NRPE.nagios_exportdir):
- if f.endswith('_{}.cfg'.format(self.command)):
- os.remove(os.path.join(NRPE.nagios_exportdir, f))
-
- def remove(self, hostname):
- nrpe_check_file = self._get_check_filename()
- if os.path.exists(nrpe_check_file):
- os.remove(nrpe_check_file)
- self._remove_service_files()
-
- def write(self, nagios_context, hostname, nagios_servicegroups):
- nrpe_check_file = self._get_check_filename()
- with open(nrpe_check_file, 'w') as nrpe_check_config:
- nrpe_check_config.write("# check {}\n".format(self.shortname))
- nrpe_check_config.write("command[{}]={}\n".format(
- self.command, self.check_cmd))
-
- if not os.path.exists(NRPE.nagios_exportdir):
- log('Not writing service config as {} is not accessible'.format(
- NRPE.nagios_exportdir))
- else:
- self.write_service_config(nagios_context, hostname,
- nagios_servicegroups)
-
- def write_service_config(self, nagios_context, hostname,
- nagios_servicegroups):
- self._remove_service_files()
-
- templ_vars = {
- 'nagios_hostname': hostname,
- 'nagios_servicegroup': nagios_servicegroups,
- 'description': self.description,
- 'shortname': self.shortname,
- 'command': self.command,
- }
- nrpe_service_text = Check.service_template.format(**templ_vars)
- nrpe_service_file = self._get_service_filename(hostname)
- with open(nrpe_service_file, 'w') as nrpe_service_config:
- nrpe_service_config.write(str(nrpe_service_text))
-
- def run(self):
- subprocess.call(self.check_cmd)
-
-
-class NRPE(object):
- nagios_logdir = '/var/log/nagios'
- nagios_exportdir = '/var/lib/nagios/export'
- nrpe_confdir = '/etc/nagios/nrpe.d'
-
- def __init__(self, hostname=None):
- super(NRPE, self).__init__()
- self.config = config()
- self.nagios_context = self.config['nagios_context']
- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
- self.nagios_servicegroups = self.config['nagios_servicegroups']
- else:
- self.nagios_servicegroups = self.nagios_context
- self.unit_name = local_unit().replace('/', '-')
- if hostname:
- self.hostname = hostname
- else:
- nagios_hostname = get_nagios_hostname()
- if nagios_hostname:
- self.hostname = nagios_hostname
- else:
- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
- self.checks = []
-
- def add_check(self, *args, **kwargs):
- self.checks.append(Check(*args, **kwargs))
-
- def remove_check(self, *args, **kwargs):
- if kwargs.get('shortname') is None:
- raise ValueError('shortname of check must be specified')
-
- # Use sensible defaults if they're not specified - these are not
- # actually used during removal, but they're required for constructing
- # the Check object; check_disk is chosen because it's part of the
- # nagios-plugins-basic package.
- if kwargs.get('check_cmd') is None:
- kwargs['check_cmd'] = 'check_disk'
- if kwargs.get('description') is None:
- kwargs['description'] = ''
-
- check = Check(*args, **kwargs)
- check.remove(self.hostname)
-
- def write(self):
- try:
- nagios_uid = pwd.getpwnam('nagios').pw_uid
- nagios_gid = grp.getgrnam('nagios').gr_gid
- except:
- log("Nagios user not set up, nrpe checks not updated")
- return
-
- if not os.path.exists(NRPE.nagios_logdir):
- os.mkdir(NRPE.nagios_logdir)
- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
-
- nrpe_monitors = {}
- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
- for nrpecheck in self.checks:
- nrpecheck.write(self.nagios_context, self.hostname,
- self.nagios_servicegroups)
- nrpe_monitors[nrpecheck.shortname] = {
- "command": nrpecheck.command,
- }
-
- service('restart', 'nagios-nrpe-server')
-
- monitor_ids = relation_ids("local-monitors") + \
- relation_ids("nrpe-external-master")
- for rid in monitor_ids:
- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
-
-
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_host_context
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_host_context' in rel:
- return rel['nagios_host_context']
-
-
-def get_nagios_hostname(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_hostname
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_hostname' in rel:
- return rel['nagios_hostname']
-
-
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
- """
- Return the nagios unit name prepended with host_context if needed
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- host_context = get_nagios_hostcontext(relation_name)
- if host_context:
- unit = "%s:%s" % (host_context, local_unit())
- else:
- unit = local_unit()
- return unit
-
-
-def add_init_service_checks(nrpe, services, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param list services: List of services to check
- :param str unit_name: Unit name to use in check description
- """
- for svc in services:
- upstart_init = '/etc/init/%s.conf' % svc
- sysv_init = '/etc/init.d/%s' % svc
- if os.path.exists(upstart_init):
- # Don't add a check for these services from neutron-gateway
- if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_upstart_job %s' % svc
- )
- elif os.path.exists(sysv_init):
- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
- cron_file = ('*/5 * * * * root '
- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-s /etc/init.d/%s status > '
- '/var/lib/nagios/service-check-%s.txt\n' % (svc,
- svc)
- )
- f = open(cronpath, 'w')
- f.write(cron_file)
- f.close()
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_status_file.py -f '
- '/var/lib/nagios/service-check-%s.txt' % svc,
- )
-
-
-def copy_nrpe_checks():
- """
- Copy the nrpe checks into place
-
- """
- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
- nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
- 'charmhelpers', 'contrib', 'openstack',
- 'files')
-
- if not os.path.exists(NAGIOS_PLUGINS):
- os.makedirs(NAGIOS_PLUGINS)
- for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
- if os.path.isfile(fname):
- shutil.copy2(fname,
- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
-
-
-def add_haproxy_checks(nrpe, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param str unit_name: Unit name to use in check description
- """
- nrpe.add_check(
- shortname='haproxy_servers',
- description='Check HAProxy {%s}' % unit_name,
- check_cmd='check_haproxy.sh')
- nrpe.add_check(
- shortname='haproxy_queue',
- description='Check HAProxy queue depth {%s}' % unit_name,
- check_cmd='check_haproxy_queue_depth.sh')
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/volumes.py
deleted file mode 100644
index 7ea43f0..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/charmsupport/volumes.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-'''
-Functions for managing volumes in juju units. One volume is supported per unit.
-Subordinates may have their own storage, provided it is on its own partition.
-
-Configuration stanzas::
-
- volume-ephemeral:
- type: boolean
- default: true
- description: >
- If false, a volume is mounted as sepecified in "volume-map"
- If true, ephemeral storage will be used, meaning that log data
- will only exist as long as the machine. YOU HAVE BEEN WARNED.
- volume-map:
- type: string
- default: {}
- description: >
- YAML map of units to device names, e.g:
- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
- Service units will raise a configure-error if volume-ephemeral
- is 'true' and no volume-map value is set. Use 'juju set' to set a
- value and 'juju resolved' to complete configuration.
-
-Usage::
-
- from charmsupport.volumes import configure_volume, VolumeConfigurationError
- from charmsupport.hookenv import log, ERROR
- def post_mount_hook():
- stop_service('myservice')
- def post_mount_hook():
- start_service('myservice')
-
- if __name__ == '__main__':
- try:
- configure_volume(before_change=pre_mount_hook,
- after_change=post_mount_hook)
- except VolumeConfigurationError:
- log('Storage could not be configured', ERROR)
-
-'''
-
-# XXX: Known limitations
-# - fstab is neither consulted nor updated
-
-import os
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-import yaml
-
-
-MOUNT_BASE = '/srv/juju/volumes'
-
-
-class VolumeConfigurationError(Exception):
- '''Volume configuration data is missing or invalid'''
- pass
-
-
-def get_config():
- '''Gather and sanity-check volume configuration data'''
- volume_config = {}
- config = hookenv.config()
-
- errors = False
-
- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
- volume_config['ephemeral'] = True
- else:
- volume_config['ephemeral'] = False
-
- try:
- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
- except yaml.YAMLError as e:
- hookenv.log("Error parsing YAML volume-map: {}".format(e),
- hookenv.ERROR)
- errors = True
- if volume_map is None:
- # probably an empty string
- volume_map = {}
- elif not isinstance(volume_map, dict):
- hookenv.log("Volume-map should be a dictionary, not {}".format(
- type(volume_map)))
- errors = True
-
- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
- if volume_config['device'] and volume_config['ephemeral']:
- # asked for ephemeral storage but also defined a volume ID
- hookenv.log('A volume is defined for this unit, but ephemeral '
- 'storage was requested', hookenv.ERROR)
- errors = True
- elif not volume_config['device'] and not volume_config['ephemeral']:
- # asked for permanent storage but did not define volume ID
- hookenv.log('Ephemeral storage was requested, but there is no volume '
- 'defined for this unit.', hookenv.ERROR)
- errors = True
-
- unit_mount_name = hookenv.local_unit().replace('/', '-')
- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
-
- if errors:
- return None
- return volume_config
-
-
-def mount_volume(config):
- if os.path.exists(config['mountpoint']):
- if not os.path.isdir(config['mountpoint']):
- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
- raise VolumeConfigurationError()
- else:
- host.mkdir(config['mountpoint'])
- if os.path.ismount(config['mountpoint']):
- unmount_volume(config)
- if not host.mount(config['device'], config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def unmount_volume(config):
- if os.path.ismount(config['mountpoint']):
- if not host.umount(config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def managed_mounts():
- '''List of all mounted managed volumes'''
- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
-
-
-def configure_volume(before_change=lambda: None, after_change=lambda: None):
- '''Set up storage (or don't) according to the charm's volume configuration.
- Returns the mount point or "ephemeral". before_change and after_change
- are optional functions to be called if the volume configuration changes.
- '''
-
- config = get_config()
- if not config:
- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
- raise VolumeConfigurationError()
-
- if config['ephemeral']:
- if os.path.ismount(config['mountpoint']):
- before_change()
- unmount_volume(config)
- after_change()
- return 'ephemeral'
- else:
- # persistent storage
- if os.path.ismount(config['mountpoint']):
- mounts = dict(managed_mounts())
- if mounts.get(config['mountpoint']) != config['device']:
- before_change()
- unmount_volume(config)
- mount_volume(config)
- after_change()
- else:
- before_change()
- mount_volume(config)
- after_change()
- return config['mountpoint']
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/apache.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/apache.py
deleted file mode 100644
index d0c6994..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/apache.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import os
-import subprocess
-
-from charmhelpers.core.hookenv import (
- config as config_get,
- relation_get,
- relation_ids,
- related_units as relation_list,
- log,
- INFO,
-)
-
-
-def get_cert(cn=None):
- # TODO: deal with multiple https endpoints via charm config
- cert = config_get('ssl_cert')
- key = config_get('ssl_key')
- if not (cert and key):
- log("Inspecting identity-service relations for SSL certificate.",
- level=INFO)
- cert = key = None
- if cn:
- ssl_cert_attr = 'ssl_cert_{}'.format(cn)
- ssl_key_attr = 'ssl_key_{}'.format(cn)
- else:
- ssl_cert_attr = 'ssl_cert'
- ssl_key_attr = 'ssl_key'
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if not cert:
- cert = relation_get(ssl_cert_attr,
- rid=r_id, unit=unit)
- if not key:
- key = relation_get(ssl_key_attr,
- rid=r_id, unit=unit)
- return (cert, key)
-
-
-def get_ca_cert():
- ca_cert = config_get('ssl_ca')
- if ca_cert is None:
- log("Inspecting identity-service relations for CA SSL certificate.",
- level=INFO)
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if ca_cert is None:
- ca_cert = relation_get('ca_cert',
- rid=r_id, unit=unit)
- return ca_cert
-
-
-def retrieve_ca_cert(cert_file):
- cert = None
- if os.path.isfile(cert_file):
- with open(cert_file, 'r') as crt:
- cert = crt.read()
- return cert
-
-
-def install_ca_cert(ca_cert):
- if ca_cert:
- cert_file = ('/usr/local/share/ca-certificates/'
- 'keystone_juju_ca_cert.crt')
- old_cert = retrieve_ca_cert(cert_file)
- if old_cert and old_cert == ca_cert:
- log("CA cert is the same as installed version", level=INFO)
- else:
- log("Installing new CA cert", level=INFO)
- with open(cert_file, 'w') as crt:
- crt.write(ca_cert)
- subprocess.check_call(['update-ca-certificates', '--fresh'])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/cluster.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/cluster.py
deleted file mode 100644
index e02350e..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/hahelpers/cluster.py
+++ /dev/null
@@ -1,363 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-"""
-Helpers for clustering and determining "cluster leadership" and other
-clustering-related helpers.
-"""
-
-import subprocess
-import os
-
-from socket import gethostname as get_unit_hostname
-
-import six
-
-from charmhelpers.core.hookenv import (
- log,
- relation_ids,
- related_units as relation_list,
- relation_get,
- config as config_get,
- INFO,
- DEBUG,
- WARNING,
- unit_get,
- is_leader as juju_is_leader,
- status_set,
-)
-from charmhelpers.core.decorators import (
- retry_on_exception,
-)
-from charmhelpers.core.strutils import (
- bool_from_string,
-)
-
-DC_RESOURCE_NAME = 'DC'
-
-
-class HAIncompleteConfig(Exception):
- pass
-
-
-class HAIncorrectConfig(Exception):
- pass
-
-
-class CRMResourceNotFound(Exception):
- pass
-
-
-class CRMDCNotFound(Exception):
- pass
-
-
-def is_elected_leader(resource):
- """
- Returns True if the charm executing this is the elected cluster leader.
-
- It relies on two mechanisms to determine leadership:
- 1. If juju is sufficiently new and leadership election is supported,
- the is_leader command will be used.
- 2. If the charm is part of a corosync cluster, call corosync to
- determine leadership.
- 3. If the charm is not part of a corosync cluster, the leader is
- determined as being "the alive unit with the lowest unit numer". In
- other words, the oldest surviving unit.
- """
- try:
- return juju_is_leader()
- except NotImplementedError:
- log('Juju leadership election feature not enabled'
- ', using fallback support',
- level=WARNING)
-
- if is_clustered():
- if not is_crm_leader(resource):
- log('Deferring action to CRM leader.', level=INFO)
- return False
- else:
- peers = peer_units()
- if peers and not oldest_peer(peers):
- log('Deferring action to oldest service unit.', level=INFO)
- return False
- return True
-
-
-def is_clustered():
- for r_id in (relation_ids('ha') or []):
- for unit in (relation_list(r_id) or []):
- clustered = relation_get('clustered',
- rid=r_id,
- unit=unit)
- if clustered:
- return True
- return False
-
-
-def is_crm_dc():
- """
- Determine leadership by querying the pacemaker Designated Controller
- """
- cmd = ['crm', 'status']
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError as ex:
- raise CRMDCNotFound(str(ex))
-
- current_dc = ''
- for line in status.split('\n'):
- if line.startswith('Current DC'):
- # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
- current_dc = line.split(':')[1].split()[0]
- if current_dc == get_unit_hostname():
- return True
- elif current_dc == 'NONE':
- raise CRMDCNotFound('Current DC: NONE')
-
- return False
-
-
-@retry_on_exception(5, base_delay=2,
- exc_type=(CRMResourceNotFound, CRMDCNotFound))
-def is_crm_leader(resource, retry=False):
- """
- Returns True if the charm calling this is the elected corosync leader,
- as returned by calling the external "crm" command.
-
- We allow this operation to be retried to avoid the possibility of getting a
- false negative. See LP #1396246 for more info.
- """
- if resource == DC_RESOURCE_NAME:
- return is_crm_dc()
- cmd = ['crm', 'resource', 'show', resource]
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError:
- status = None
-
- if status and get_unit_hostname() in status:
- return True
-
- if status and "resource %s is NOT running" % (resource) in status:
- raise CRMResourceNotFound("CRM resource %s not found" % (resource))
-
- return False
-
-
-def is_leader(resource):
- log("is_leader is deprecated. Please consider using is_crm_leader "
- "instead.", level=WARNING)
- return is_crm_leader(resource)
-
-
-def peer_units(peer_relation="cluster"):
- peers = []
- for r_id in (relation_ids(peer_relation) or []):
- for unit in (relation_list(r_id) or []):
- peers.append(unit)
- return peers
-
-
-def peer_ips(peer_relation='cluster', addr_key='private-address'):
- '''Return a dict of peers and their private-address'''
- peers = {}
- for r_id in relation_ids(peer_relation):
- for unit in relation_list(r_id):
- peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
- return peers
-
-
-def oldest_peer(peers):
- """Determines who the oldest peer is by comparing unit numbers."""
- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
- for peer in peers:
- remote_unit_no = int(peer.split('/')[1])
- if remote_unit_no < local_unit_no:
- return False
- return True
-
-
-def eligible_leader(resource):
- log("eligible_leader is deprecated. Please consider using "
- "is_elected_leader instead.", level=WARNING)
- return is_elected_leader(resource)
-
-
-def https():
- '''
- Determines whether enough data has been provided in configuration
- or relation data to configure HTTPS
- .
- returns: boolean
- '''
- use_https = config_get('use-https')
- if use_https and bool_from_string(use_https):
- return True
- if config_get('ssl_cert') and config_get('ssl_key'):
- return True
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
- rel_state = [
- relation_get('https_keystone', rid=r_id, unit=unit),
- relation_get('ca_cert', rid=r_id, unit=unit),
- ]
- # NOTE: works around (LP: #1203241)
- if (None not in rel_state) and ('' not in rel_state):
- return True
- return False
-
-
-def determine_api_port(public_port, singlenode_mode=False):
- '''
- Determine correct API server listening port based on
- existence of HTTPS reverse proxy and/or haproxy.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the API service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- if https():
- i += 1
- return public_port - (i * 10)
-
-
-def determine_apache_port(public_port, singlenode_mode=False):
- '''
- Description: Determine correct apache listening port based on public IP +
- state of the cluster.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the HAProxy service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- return public_port - (i * 10)
-
-
-def get_hacluster_config(exclude_keys=None):
- '''
- Obtains all relevant configuration from charm configuration required
- for initiating a relation to hacluster:
-
- ha-bindiface, ha-mcastport, vip, os-internal-hostname,
- os-admin-hostname, os-public-hostname, os-access-hostname
-
- param: exclude_keys: list of setting key(s) to be excluded.
- returns: dict: A dict containing settings keyed by setting name.
- raises: HAIncompleteConfig if settings are missing or incorrect.
- '''
- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
- 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
- conf = {}
- for setting in settings:
- if exclude_keys and setting in exclude_keys:
- continue
-
- conf[setting] = config_get(setting)
-
- if not valid_hacluster_config():
- raise HAIncorrectConfig('Insufficient or incorrect config data to '
- 'configure hacluster.')
- return conf
-
-
-def valid_hacluster_config():
- '''
- Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
- must be set.
-
- Note: ha-bindiface and ha-macastport both have defaults and will always
- be set. We only care that either vip or dns-ha is set.
-
- :returns: boolean: valid config returns true.
- raises: HAIncompatibileConfig if settings conflict.
- raises: HAIncompleteConfig if settings are missing.
- '''
- vip = config_get('vip')
- dns = config_get('dns-ha')
- if not(bool(vip) ^ bool(dns)):
- msg = ('HA: Either vip or dns-ha must be set but not both in order to '
- 'use high availability')
- status_set('blocked', msg)
- raise HAIncorrectConfig(msg)
-
- # If dns-ha then one of os-*-hostname must be set
- if dns:
- dns_settings = ['os-internal-hostname', 'os-admin-hostname',
- 'os-public-hostname', 'os-access-hostname']
- # At this point it is unknown if one or all of the possible
- # network spaces are in HA. Validate at least one is set which is
- # the minimum required.
- for setting in dns_settings:
- if config_get(setting):
- log('DNS HA: At least one hostname is set {}: {}'
- ''.format(setting, config_get(setting)),
- level=DEBUG)
- return True
-
- msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
- 'DNS HA')
- status_set('blocked', msg)
- raise HAIncompleteConfig(msg)
-
- log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
- return True
-
-
-def canonical_url(configs, vip_setting='vip'):
- '''
- Returns the correct HTTP URL to this host given the state of HTTPS
- configuration and hacluster.
-
- :configs : OSTemplateRenderer: A config tempating object to inspect for
- a complete https context.
-
- :vip_setting: str: Setting in charm config that specifies
- VIP address.
- '''
- scheme = 'http'
- if 'https' in configs.complete_contexts():
- scheme = 'https'
- if is_clustered():
- addr = config_get(vip_setting)
- else:
- addr = unit_get('private-address')
- return '%s://%s' % (scheme, addr)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/ip.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index d6dee17..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,497 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.core.hookenv import unit_get
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network(s): %s" % network)
- raise ValueError(errmsg)
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- networks = network.split() or [network]
- for network in networks:
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
-
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def resolve_network_cidr(ip_address):
- '''
- Resolves the full address cidr of an ip_address based on
- configured network interfaces
- '''
- netmask = get_netmask_for_address(ip_address)
- return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any.
-
- :param iface: network interface on which address(es) are expected to
- be found.
- :param inet_type: inet address family
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :return: list of ip addresses
- """
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
-
- :param iface: network interface on which ipv6 address(es) are expected to
- be found.
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :param dynamic_only: only recognise dynamic addresses
- :return: list of ipv6 addresses
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4/IPv6 address
- address = netaddr.IPAddress(address)
- return True
- except netaddr.AddrFormatError:
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- apt_install('python-dnspython', fatal=True)
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- answers = dns.resolver.query(address, rtype)
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- apt_install("python-dnspython", fatal=True)
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
-
-
-def port_has_listener(address, port):
- """
- Returns True if the address:port is open and being listened to,
- else False.
-
- @param address: an IP address or hostname
- @param port: integer port
-
- Note calls 'zc' via a subprocess shell
- """
- cmd = ['nc', '-z', address, str(port)]
- result = subprocess.call(cmd)
- return not(bool(result))
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index 1501641..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 6ce91db..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the ~openstack-charmers
- base_charms = {
- 'mysql': ['precise', 'trusty'],
- 'mongodb': ['precise', 'trusty'],
- 'nrpe': ['precise', 'trusty', 'wily', 'xenial'],
- }
-
- for svc in other_services:
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if svc['name'] in base_charms:
- # NOTE: not all charms have support for all series we
- # want/need to test against, so fix to most recent
- # that each base charm supports
- target_series = self.series
- if self.series not in base_charms[svc['name']]:
- target_series = base_charms[svc['name']][-1]
- svc['location'] = 'cs:{}/{}'.format(target_series,
- svc['name'])
- elif self.stable:
- svc['location'] = 'cs:{}/{}'.format(self.series,
- svc['name'])
- else:
- svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
- self.series,
- svc['name']
- )
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index 8040b57..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1010 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index 76737f2..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1508 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import glob
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call, CalledProcessError
-
-import six
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
- status_set,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-from charmhelpers.contrib.openstack.exceptions import OSContextError
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
- pwgen,
- lsb_release,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import (
- config_flags_parser,
- get_host_ip,
-)
-from charmhelpers.core.unitdata import kv
-
-try:
- import psutil
-except ImportError:
- apt_install('python-psutil', fatal=True)
- import psutil
-
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- api_version = rdata.get('api_version') or '2.0'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol,
- 'api_version': api_version})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('haproxy-queue-timeout'):
- ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
-
- if config('haproxy-connect-timeout'):
- ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
-
- ctxt['stat_port'] = '8888'
-
- db = kv()
- ctxt['stat_password'] = db.get('stat-password')
- if not ctxt['stat_password']:
- ctxt['stat_password'] = db.set('stat-password',
- pwgen(32))
- db.flush()
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def midonet_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- midonet_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- mido_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'midonet',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': midonet_config}
-
- return mido_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
- elif self.plugin == 'midonet':
- ctxt.update(self.midonet_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class LibvirtConfigFlagsContext(OSContextGenerator):
- """
- This context provides support for extending
- the libvirt section through user-defined flags.
- """
- def __call__(self):
- ctxt = {}
- libvirt_flags = config('libvirt-flags')
- if libvirt_flags:
- ctxt['libvirt_flags'] = config_flags_parser(
- libvirt_flags)
- return ctxt
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_configuration': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from '
- 'subordinate_configuration setting from %s'
- % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s service'
- % (rid, service), level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s'
- % (rid, self.config_file), level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- # NOTE: use cpu_count if present (16.04 support)
- if hasattr(psutil, 'cpu_count'):
- return psutil.cpu_count()
- else:
- return psutil.NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- count = int(self.num_cpus * multiplier)
- if multiplier > 0 and count == 0:
- count = 1
- ctxt = {"workers": count}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {normalized[port]: bridge for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.keys():
- ports = sorted(mappings.keys())
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- all_ports = set()
- # If any of ports is a vlan device, its underlying device must have
- # mtu applied first.
- for port in ports:
- for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
- lport = os.path.basename(lport)
- all_ports.add(lport.split('_')[1])
-
- all_ports = list(all_ports)
- all_ports.extend(ports)
- if mtu:
- ctxt["devs"] = '\\n'.join(all_ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- 'api_version':
- rdata.get('api_version') or '2.0',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
-
-
-class InternalEndpointContext(OSContextGenerator):
- """Internal endpoint context.
-
- This context provides the endpoint type used for communication between
- services e.g. between Nova and Cinder internally. Openstack uses Public
- endpoints by default so this allows admins to optionally use internal
- endpoints.
- """
- def __call__(self):
- return {'use_internal_endpoints': config('use-internal-endpoints')}
-
-
-class AppArmorContext(OSContextGenerator):
- """Base class for apparmor contexts."""
-
- def __init__(self):
- self._ctxt = None
- self.aa_profile = None
- self.aa_utils_packages = ['apparmor-utils']
-
- @property
- def ctxt(self):
- if self._ctxt is not None:
- return self._ctxt
- self._ctxt = self._determine_ctxt()
- return self._ctxt
-
- def _determine_ctxt(self):
- """
- Validate aa-profile-mode settings is disable, enforce, or complain.
-
- :return ctxt: Dictionary of the apparmor profile or None
- """
- if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
- ctxt = {'aa_profile_mode': config('aa-profile-mode'),
- 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']}
- else:
- ctxt = None
- return ctxt
-
- def __call__(self):
- return self.ctxt
-
- def install_aa_utils(self):
- """
- Install packages required for apparmor configuration.
- """
- log("Installing apparmor utils.")
- ensure_packages(self.aa_utils_packages)
-
- def manually_disable_aa_profile(self):
- """
- Manually disable an apparmor profile.
-
- If aa-profile-mode is set to disabled (default) this is required as the
- template has been written but apparmor is yet unaware of the profile
- and aa-disable aa-profile fails. Without this the profile would kick
- into enforce mode on the next service restart.
-
- """
- profile_path = '/etc/apparmor.d'
- disable_path = '/etc/apparmor.d/disable'
- if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
- os.symlink(os.path.join(profile_path, self.aa_profile),
- os.path.join(disable_path, self.aa_profile))
-
- def setup_aa_profile(self):
- """
- Setup an apparmor profile.
- The ctxt dictionary will contain the apparmor profile mode and
- the apparmor profile name.
- Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
- the apparmor profile.
- """
- self()
- if not self.ctxt:
- log("Not enabling apparmor Profile")
- return
- self.install_aa_utils()
- cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])]
- cmd.append(self.ctxt['aa_profile'])
- log("Setting up the apparmor profile for {} in {} mode."
- "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode']))
- try:
- check_call(cmd)
- except CalledProcessError as e:
- # If aa-profile-mode is set to disabled (default) manual
- # disabling is required as the template has been written but
- # apparmor is yet unaware of the profile and aa-disable aa-profile
- # fails. If aa-disable learns to read profile files first this can
- # be removed.
- if self.ctxt['aa_profile_mode'] == 'disable':
- log("Manually disabling the apparmor profile for {}."
- "".format(self.ctxt['aa_profile']))
- self.manually_disable_aa_profile()
- return
- status_set('blocked', "Apparmor profile {} failed to be set to {}."
- "".format(self.ctxt['aa_profile'],
- self.ctxt['aa_profile_mode']))
- raise e
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py
deleted file mode 100644
index f85ae4f..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/exceptions.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-class OSContextError(Exception):
- """Raised when an error occurs during context generation.
-
- This exception is principally used in contrib.openstack.context
- """
- pass
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 9df5f74..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py
deleted file mode 100644
index 9b088de..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py
deleted file mode 100644
index 1f5310b..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ha/utils.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2014-2016 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2016 Canonical Ltd.
-#
-# Authors:
-# Openstack Charmers <
-#
-
-"""
-Helpers for high availability.
-"""
-
-import re
-
-from charmhelpers.core.hookenv import (
- log,
- relation_set,
- charm_name,
- config,
- status_set,
- DEBUG,
-)
-
-from charmhelpers.core.host import (
- lsb_release
-)
-
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
-)
-
-
-class DNSHAException(Exception):
- """Raised when an error occurs setting up DNS HA
- """
-
- pass
-
-
-def update_dns_ha_resource_params(resources, resource_params,
- relation_id=None,
- crm_ocf='ocf:maas:dns'):
- """ Check for os-*-hostname settings and update resource dictionaries for
- the HA relation.
-
- @param resources: Pointer to dictionary of resources.
- Usually instantiated in ha_joined().
- @param resource_params: Pointer to dictionary of resource parameters.
- Usually instantiated in ha_joined()
- @param relation_id: Relation ID of the ha relation
- @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
- DNS HA
- """
-
- # Validate the charm environment for DNS HA
- assert_charm_supports_dns_ha()
-
- settings = ['os-admin-hostname', 'os-internal-hostname',
- 'os-public-hostname', 'os-access-hostname']
-
- # Check which DNS settings are set and update dictionaries
- hostname_group = []
- for setting in settings:
- hostname = config(setting)
- if hostname is None:
- log('DNS HA: Hostname setting {} is None. Ignoring.'
- ''.format(setting),
- DEBUG)
- continue
- m = re.search('os-(.+?)-hostname', setting)
- if m:
- networkspace = m.group(1)
- else:
- msg = ('Unexpected DNS hostname setting: {}. '
- 'Cannot determine network space name'
- ''.format(setting))
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
- hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
- if hostname_key in hostname_group:
- log('DNS HA: Resource {}: {} already exists in '
- 'hostname group - skipping'.format(hostname_key, hostname),
- DEBUG)
- continue
-
- hostname_group.append(hostname_key)
- resources[hostname_key] = crm_ocf
- resource_params[hostname_key] = (
- 'params fqdn="{}" ip_address="{}" '
- ''.format(hostname, resolve_address(endpoint_type=networkspace,
- override=False)))
-
- if len(hostname_group) >= 1:
- log('DNS HA: Hostname group is set with {} as members. '
- 'Informing the ha relation'.format(' '.join(hostname_group)),
- DEBUG)
- relation_set(relation_id=relation_id, groups={
- 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
- else:
- msg = 'DNS HA: Hostname group has no members.'
- status_set('blocked', msg)
- raise DNSHAException(msg)
-
-
-def assert_charm_supports_dns_ha():
- """Validate prerequisites for DNS HA
- The MAAS client is only available on Xenial or greater
- """
- if lsb_release().get('DISTRIB_RELEASE') < '16.04':
- msg = ('DNS HA is only supported on 16.04 and greater '
- 'versions of Ubuntu.')
- status_set('blocked', msg)
- raise DNSHAException(msg)
- return True
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 0fd3ac2..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
- network_get_primary_address,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
- resolve_network_cidr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'binding': 'public',
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'binding': 'internal',
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'binding': 'admin',
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC, override=True):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured, or a Juju 2.0 extra-binding has been used.
-
- :param endpoint_type: Network endpoing type
- :param override: Accept hostname overrides or not
- """
- resolved_address = None
- if override:
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- binding = ADDRESS_MAP[endpoint_type]['binding']
- clustered = is_clustered()
-
- if clustered and vips:
- if net_addr:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- # NOTE: endeavour to check vips against network space
- # bindings
- try:
- bound_cidr = resolve_network_cidr(
- network_get_primary_address(binding)
- )
- for vip in vips:
- if is_address_in_network(bound_cidr, vip):
- resolved_address = vip
- break
- except NotImplementedError:
- # If no net-splits configured and no support for extra
- # bindings/network spaces so we expect a single vip
- resolved_address = vips[0]
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- if net_addr:
- resolved_address = get_address_in_network(net_addr, fallback_addr)
- else:
- # NOTE: only try to use extra bindings if legacy network
- # configuration is not in use
- try:
- resolved_address = network_get_primary_address(binding)
- except NotImplementedError:
- resolved_address = fallback_addr
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index 03427b4..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return [headers_package(), 'openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': ['plumgrid-lxc',
- 'iovisor-dkms'],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- },
- 'midonet': {
- 'config': '/etc/neutron/plugins/midonet/midonet.ini',
- 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package()],
- 'server_packages': ['neutron-server',
- 'python-neutron-plugin-midonet'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- if release >= 'kilo':
- plugins['midonet']['driver'] = (
- 'neutron.plugins.midonet.plugin.MidonetPluginV2')
- if release >= 'liberty':
- plugins['midonet']['driver'] = (
- 'midonet.neutron.plugin_v1.MidonetPluginV2')
- plugins['midonet']['server_packages'].remove(
- 'python-neutron-plugin-midonet')
- plugins['midonet']['server_packages'].append(
- 'python-networking-midonet')
- plugins['plumgrid']['driver'] = (
- 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
- plugins['plumgrid']['server_packages'].remove(
- 'neutron-plugin-plumgrid')
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of bridge:port.
-
- Returns dict of the form {port:bridge} where ports may be mac addresses or
- interface names.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 9df5f74..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index 8958895..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,321 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 9d3e3d8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,1891 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-import itertools
-import functools
-import shutil
-
-import six
-import tempfile
-import traceback
-import uuid
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- action_fail,
- action_set,
- config,
- log as juju_log,
- charm_dir,
- DEBUG,
- INFO,
- ERROR,
- related_units,
- relation_ids,
- relation_set,
- service_name,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr,
- is_ipv6,
- port_has_listener,
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import (
- lsb_release,
- mounts,
- umount,
- service_running,
- service_pause,
- service_resume,
- restart_on_change_helper,
-)
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-from charmhelpers.contrib.openstack.exceptions import OSContextError
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ('yakkety', 'newton'),
- ('zebra', 'ocata'), # TODO: upload with real Z name
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
- ('2016.1', 'mitaka'),
- ('2016.2', 'newton'),
- ('2017.1', 'ocata'),
-])
-
-# The ugly duckling - must list releases oldest to newest
-SWIFT_CODENAMES = OrderedDict([
- ('diablo',
- ['1.4.3']),
- ('essex',
- ['1.4.8']),
- ('folsom',
- ['1.7.4']),
- ('grizzly',
- ['1.7.6', '1.7.7', '1.8.0']),
- ('havana',
- ['1.9.0', '1.9.1', '1.10.0']),
- ('icehouse',
- ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
- ('juno',
- ['2.0.0', '2.1.0', '2.2.0']),
- ('kilo',
- ['2.2.1', '2.2.2']),
- ('liberty',
- ['2.3.0', '2.4.0', '2.5.0']),
- ('mitaka',
- ['2.5.0', '2.6.0', '2.7.0']),
- ('newton',
- ['2.8.0', '2.9.0']),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12', 'liberty'),
- ('13', 'mitaka'),
- ('14', 'newton'),
- ('15', 'ocata'),
- ]),
- 'neutron-common': OrderedDict([
- ('7', 'liberty'),
- ('8', 'mitaka'),
- ('9', 'newton'),
- ('10', 'ocata'),
- ]),
- 'cinder-common': OrderedDict([
- ('7', 'liberty'),
- ('8', 'mitaka'),
- ('9', 'newton'),
- ('10', 'ocata'),
- ]),
- 'keystone': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ]),
- 'horizon-common': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5', 'liberty'),
- ('6', 'mitaka'),
- ('7', 'newton'),
- ('8', 'ocata'),
- ]),
- 'heat-common': OrderedDict([
- ('5', 'liberty'),
- ('6', 'mitaka'),
- ('7', 'newton'),
- ('8', 'ocata'),
- ]),
- 'glance-common': OrderedDict([
- ('11', 'liberty'),
- ('12', 'mitaka'),
- ('13', 'newton'),
- ('14', 'ocata'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8', 'liberty'),
- ('9', 'mitaka'),
- ('10', 'newton'),
- ('11', 'ocata'),
- ]),
-}
-
-GIT_DEFAULT_REPOS = {
- 'requirements': 'git://github.com/openstack/requirements',
- 'cinder': 'git://github.com/openstack/cinder',
- 'glance': 'git://github.com/openstack/glance',
- 'horizon': 'git://github.com/openstack/horizon',
- 'keystone': 'git://github.com/openstack/keystone',
- 'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
- 'neutron': 'git://github.com/openstack/neutron',
- 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
- 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
- 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
- 'nova': 'git://github.com/openstack/nova',
-}
-
-GIT_DEFAULT_BRANCHES = {
- 'liberty': 'stable/liberty',
- 'mitaka': 'stable/mitaka',
- 'master': 'master',
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_version_codename_swift(codename):
- '''Determine OpenStack version number of swift from codename.'''
- for k, v in six.iteritems(SWIFT_CODENAMES):
- if k == codename:
- return v[-1]
- e = 'Could not derive swift version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_swift_codename(version):
- '''Determine OpenStack codename that corresponds to swift version.'''
- codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
-
- if len(codenames) > 1:
- # If more than one release codename contains this version we determine
- # the actual codename based on the highest available install source.
- for codename in reversed(codenames):
- releases = UBUNTU_OPENSTACK_RELEASE
- release = [k for k, v in six.iteritems(releases) if codename in v]
- ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
- if codename in ret or release[0] in ret:
- return codename
- elif len(codenames) == 1:
- return codenames[0]
-
- # NOTE: fallback - attempt to match with just major.minor version
- match = re.match('^(\d+)\.(\d+)', version)
- if match:
- major_minor_version = match.group(0)
- for codename, versions in six.iteritems(SWIFT_CODENAMES):
- for release_version in versions:
- if release_version.startswith(major_minor_version):
- return codename
-
- return None
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- if 'swift' in pkg.name:
- # Fully x.y.z match for swift versions
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- else:
- # x.y match only for 20XX.X
- # and ignore patch level for other packages
- match = re.match('^(\d+)\.(\d+)', vers)
-
- if match:
- vers = match.group(0)
-
- # Generate a major version number for newer semantic
- # versions of openstack projects
- major_vers = vers.split('.')[0]
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- major_vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][major_vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- return get_swift_codename(vers)
- else:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- for cname, version in six.iteritems(vers_map):
- if cname == codename:
- return version[-1]
- else:
- vers_map = OPENSTACK_CODENAMES
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (git_os_codename_install_source(config('openstack-origin-git')) or
- get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- key = keyid.strip()
- if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
- key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
- juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
- juju_log("Importing ASCII Armor PGP key", level=DEBUG)
- with tempfile.NamedTemporaryFile() as keyfile:
- with open(keyfile.name, 'w') as fd:
- fd.write(key)
- fd.write("\n")
-
- cmd = ['apt-key', 'add', keyfile.name]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
- else:
- juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
- juju_log("Importing PGP key from keyserver", level=DEBUG)
- cmd = ['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
-
-
-def get_source_and_pgp_key(input):
- """Look for a pgp key ID or ascii-armor key in the given input."""
- index = input.strip()
- index = input.rfind('|')
- if index < 0:
- return input, None
-
- key = input[index + 1:].strip('|')
- source = input[:index]
- return source, key
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'newton': 'xenial-updates/newton',
- 'newton/updates': 'xenial-updates/newton',
- 'newton/proposed': 'xenial-proposed/newton',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- avail_vers = get_os_version_codename_swift(codename)
- else:
- avail_vers = get_os_version_install_source(src)
- apt.init()
- if "swift" in package:
- major_cur_vers = cur_vers.split('.', 1)[0]
- major_avail_vers = avail_vers.split('.', 1)[0]
- major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
- return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
- return apt.version_compare(avail_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- if config('vip'):
- vips = config('vip').split()
- for vip in vips:
- if vip and is_ipv6(vip):
- hosts.append(vip)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-def git_os_codename_install_source(projects_yaml):
- """
- Returns OpenStack codename of release being installed from source.
- """
- if git_install_requested():
- projects = _git_yaml_load(projects_yaml)
-
- if projects in GIT_DEFAULT_BRANCHES.keys():
- if projects == 'master':
- return 'newton'
- return projects
-
- if 'release' in projects:
- if projects['release'] == 'master':
- return 'newton'
- return projects['release']
-
- return None
-
-
-def git_default_repos(projects_yaml):
- """
- Returns default repos if a default openstack-origin-git value is specified.
- """
- service = service_name()
- core_project = service
-
- for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
- if projects_yaml == default:
-
- # add the requirements repo first
- repo = {
- 'name': 'requirements',
- 'repository': GIT_DEFAULT_REPOS['requirements'],
- 'branch': branch,
- }
- repos = [repo]
-
- # neutron-* and nova-* charms require some additional repos
- if service in ['neutron-api', 'neutron-gateway',
- 'neutron-openvswitch']:
- core_project = 'neutron'
- if service == 'neutron-api':
- repo = {
- 'name': 'networking-hyperv',
- 'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
- 'branch': branch,
- }
- repos.append(repo)
- for project in ['neutron-fwaas', 'neutron-lbaas',
- 'neutron-vpnaas', 'nova']:
- repo = {
- 'name': project,
- 'repository': GIT_DEFAULT_REPOS[project],
- 'branch': branch,
- }
- repos.append(repo)
-
- elif service in ['nova-cloud-controller', 'nova-compute']:
- core_project = 'nova'
- repo = {
- 'name': 'neutron',
- 'repository': GIT_DEFAULT_REPOS['neutron'],
- 'branch': branch,
- }
- repos.append(repo)
- elif service == 'openstack-dashboard':
- core_project = 'horizon'
-
- # finally add the current service's core project repo
- repo = {
- 'name': core_project,
- 'repository': GIT_DEFAULT_REPOS[core_project],
- 'branch': branch,
- }
- repos.append(repo)
-
- return yaml.dump(dict(repositories=repos, release=default))
-
- return projects_yaml
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-requirements_dir = None
-
-
-def git_clone_and_install(projects_yaml, core_project):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- constraints = None
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- depth = '1'
- if 'depth' in p.keys():
- depth = p['depth']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- constraints = os.path.join(repo_dir, "upper-constraints.txt")
- # upper-constraints didn't exist until after icehouse
- if not os.path.isfile(constraints):
- constraints = None
- # use constraints unless project yaml sets use_constraints to false
- if 'use_constraints' in projects.keys():
- if not projects['use_constraints']:
- constraints = None
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True,
- constraints=constraints)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
- _git_ensure_key_exists('release', projects)
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements, constraints=None):
- """
- Clone and install a single git repository.
- """
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(
- repo, dest=parent_dir, branch=branch, depth=depth)
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv,
- constraints=constraints)
- else:
- pip_install(repo_dir, venv=venv, constraints=constraints)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def git_generate_systemd_init_files(templates_dir):
- """
- Generate systemd init files.
-
- Generates and installs systemd init units and script files based on the
- *.init.in files contained in the templates_dir directory.
-
- This code is based on the openstack-pkg-tools package and its init
- script generation, which is used by the OpenStack packages.
- """
- for f in os.listdir(templates_dir):
- # Create the init script and systemd unit file from the template
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- init_source = os.path.join(templates_dir, init_file)
- service_source = os.path.join(templates_dir, service_file)
-
- init_dest = os.path.join('/etc/init.d', init_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(init_in_source, init_source)
- with open(init_source, 'a') as outfile:
- template = '/usr/share/openstack-pkg-tools/init-script-template'
- with open(template) as infile:
- outfile.write('\n\n{}'.format(infile.read()))
-
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(init_dest):
- os.remove(init_dest)
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(init_source, init_dest)
- shutil.copyfile(service_source, service_dest)
- os.chmod(init_dest, 0o755)
-
- for f in os.listdir(templates_dir):
- # If there's a service.in file, use it instead of the generated one
- if f.endswith(".service.in"):
- service_in_file = f
- service_file = f[:-3]
-
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(service_in_source, service_source)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
- for f in os.listdir(templates_dir):
- # Generate the systemd unit if there's no existing .service.in
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_in_file = "{}.service.in".format(init_file)
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- if not os.path.exists(service_in_source):
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Set the state of the workload status for the charm.
-
- This calls _determine_os_workload_status() to get the new state, message
- and sets the status using status_set()
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _determine_os_workload_status(
- configs, required_interfaces, charm_func, services, ports)
- status_set(state, message)
-
-
-def _determine_os_workload_status(
- configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Determine the state of the workload status for the charm.
-
- This function returns the new workload status for the charm based
- on the state of the interfaces, the paused state and whether the
- services are actually running and any specified ports are open.
-
- This checks:
-
- 1. if the unit should be paused, that it is actually paused. If so the
- state is 'maintenance' + message, else 'broken'.
- 2. that the interfaces/relations are complete. If they are not then
- it sets the state to either 'broken' or 'waiting' and an appropriate
- message.
- 3. If all the relation data is set, then it checks that the actual
- services really are running. If not it sets the state to 'broken'.
-
- If everything is okay then the state returns 'active'.
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _ows_check_if_paused(services, ports)
-
- if state is None:
- state, message = _ows_check_generic_interfaces(
- configs, required_interfaces)
-
- if state != 'maintenance' and charm_func:
- # _ows_check_charm_func() may modify the state, message
- state, message = _ows_check_charm_func(
- state, message, lambda: charm_func(configs))
-
- if state is None:
- state, message = _ows_check_services_running(services, ports)
-
- if state is None:
- state = 'active'
- message = "Unit is ready"
- juju_log(message, 'INFO')
-
- return state, message
-
-
-def _ows_check_if_paused(services=None, ports=None):
- """Check if the unit is supposed to be paused, and if so check that the
- services/ports (if passed) are actually stopped/not being listened to.
-
- if the unit isn't supposed to be paused, just return None, None
-
- @param services: OPTIONAL services spec or list of service names.
- @param ports: OPTIONAL list of port numbers.
- @returns state, message or None, None
- """
- if is_unit_paused_set():
- state, message = check_actually_paused(services=services,
- ports=ports)
- if state is None:
- # we're paused okay, so set maintenance and return
- state = "maintenance"
- message = "Paused. Use 'resume' action to resume normal service."
- return state, message
- return None, None
-
-
-def _ows_check_generic_interfaces(configs, required_interfaces):
- """Check the complete contexts to determine the workload status.
-
- - Checks for missing or incomplete contexts
- - juju log details of missing required data.
- - determines the correct workload status
- - creates an appropriate message for status_set(...)
-
- if there are no problems then the function returns None, None
-
- @param configs: a templating.OSConfigRenderer() object
- @params required_interfaces: {generic_interface: [specific_interface], }
- @returns state, message or None, None
- """
- incomplete_rel_data = incomplete_relation_data(configs,
- required_interfaces)
- state = None
- message = None
- missing_relations = set()
- incomplete_relations = set()
-
- for generic_interface, relations_states in incomplete_rel_data.items():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface, relation_state in relations_states.items():
- if relation_state.get('related'):
- related_interface = interface
- missing_data = relation_state.get('missing_data')
- break
- # No relation ID for the generic_interface?
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- missing_relations.add(generic_interface)
- else:
- # Relation ID eists but no related unit
- if not missing_data:
- # Edge case - relation ID exists but departings
- _hook_name = hook_name()
- if (('departed' in _hook_name or 'broken' in _hook_name) and
- related_interface in _hook_name):
- state = 'blocked'
- missing_relations.add(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface),
- "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has"
- " no units in the relation."
- "".format(generic_interface, related_interface),
- "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in missing_relations:
- incomplete_relations.add(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- return state, message
-
-
-def _ows_check_charm_func(state, message, charm_func_with_configs):
- """Run a custom check function for the charm to see if it wants to
- change the state. This is only run if not in 'maintenance' and
- tests to see if the new state is more important that the previous
- one determined by the interfaces/relations check.
-
- @param state: the previously determined state so far.
- @param message: the user orientated message so far.
- @param charm_func: a callable function that returns state, message
- @returns state, message strings.
- """
- if charm_func_with_configs:
- charm_state, charm_message = charm_func_with_configs()
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- charm_message = charm_message.replace("Incomplete relations: ",
- "")
- message = "{}, {}".format(message, charm_message)
- else:
- message = charm_message
- return state, message
-
-
-def _ows_check_services_running(services, ports):
- """Check that the services that should be running are actually running
- and that any ports specified are being listened to.
-
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: list of ports
- @returns state, message: strings or None, None
- """
- messages = []
- state = None
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, running = _check_running_services(services)
- if not all(running):
- messages.append(
- "Services not running that should be: {}"
- .format(", ".join(_filter_tuples(services_running, False))))
- state = 'blocked'
- # also verify that the ports that should be open are open
- # NB, that ServiceManager objects only OPTIONALLY have ports
- map_not_open, ports_open = (
- _check_listening_on_services_ports(services))
- if not all(ports_open):
- # find which service has missing ports. They are in service
- # order which makes it a bit easier.
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in map_not_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "Services with ports not open that should be: {}"
- .format(message))
- state = 'blocked'
-
- if ports is not None:
- # and we can also check ports which we don't know the service for
- ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
- if not all(ports_open_bools):
- messages.append(
- "Ports which should be open, but are not: {}"
- .format(", ".join([str(p) for p, v in ports_open
- if not v])))
- state = 'blocked'
-
- if state is not None:
- message = "; ".join(messages)
- return state, message
-
- return None, None
-
-
-def _extract_services_list_helper(services):
- """Extract a OrderedDict of {service: [ports]} of the supplied services
- for use by the other functions.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param services: see above
- @returns OrderedDict(service: [ports], ...)
- """
- if services is None:
- return {}
- if isinstance(services, dict):
- services = services.values()
- # either extract the list of services from the dictionary, or if
- # it is a simple string, use that. i.e. works with mixed lists.
- _s = OrderedDict()
- for s in services:
- if isinstance(s, dict) and 'service' in s:
- _s[s['service']] = s.get('ports', [])
- if isinstance(s, str):
- _s[s] = []
- return _s
-
-
-def _check_running_services(services):
- """Check that the services dict provided is actually running and provide
- a list of (service, boolean) tuples for each service.
-
- Returns both a zipped list of (service, boolean) and a list of booleans
- in the same order as the services.
-
- @param services: OrderedDict of strings: [ports], one for each service to
- check.
- @returns [(service, boolean), ...], : results for checks
- [boolean] : just the result of the service checks
- """
- services_running = [service_running(s) for s in services]
- return list(zip(services, services_running)), services_running
-
-
-def _check_listening_on_services_ports(services, test=False):
- """Check that the unit is actually listening (has the port open) on the
- ports that the service specifies are open. If test is True then the
- function returns the services with ports that are open rather than
- closed.
-
- Returns an OrderedDict of service: ports and a list of booleans
-
- @param services: OrderedDict(service: [port, ...], ...)
- @param test: default=False, if False, test for closed, otherwise open.
- @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
- """
- test = not(not(test)) # ensure test is True or False
- all_ports = list(itertools.chain(*services.values()))
- ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
- map_ports = OrderedDict()
- matched_ports = [p for p, opened in zip(all_ports, ports_states)
- if opened == test] # essentially opened xor test
- for service, ports in services.items():
- set_ports = set(ports).intersection(matched_ports)
- if set_ports:
- map_ports[service] = set_ports
- return map_ports, ports_states
-
-
-def _check_listening_on_ports_list(ports):
- """Check that the ports list given are being listened to
-
- Returns a list of ports being listened to and a list of the
- booleans.
-
- @param ports: LIST or port numbers.
- @returns [(port_num, boolean), ...], [boolean]
- """
- ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
- return zip(ports, ports_open), ports_open
-
-
-def _filter_tuples(services_states, state):
- """Return a simple list from a list of tuples according to the condition
-
- @param services_states: LIST of (string, boolean): service and running
- state.
- @param state: Boolean to match the tuple against.
- @returns [LIST of strings] that matched the tuple RHS.
- """
- return [s for s, b in services_states if b == state]
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = [
- svc_type
- for svc_type, interfaces in required_interfaces.items()
- if not set(interfaces).intersection(complete_ctxts)]
- return {
- i: configs.get_incomplete_context_data(required_interfaces[i])
- for i in incomplete_relations}
-
-
-def do_action_openstack_upgrade(package, upgrade_callback, configs):
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed.
-
- @param package: package name for determining if upgrade available
- @param upgrade_callback: function callback to charm's upgrade function
- @param configs: templating object derived from OSConfigRenderer class
-
- @return: True if upgrade successful; False if upgrade failed or skipped
- """
- ret = False
-
- if git_install_requested():
- action_set({'outcome': 'installed from source, skipped upgrade.'})
- else:
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
-
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
- else:
- action_set({'outcome': 'no upgrade available.'})
-
- return ret
-
-
-def remote_restart(rel_name, remote_service=None):
- trigger = {
- 'restart-trigger': str(uuid.uuid4()),
- }
- if remote_service:
- trigger['remote-service'] = remote_service
- for rid in relation_ids(rel_name):
- # This subordinate can be related to two seperate services using
- # different subordinate relations so only issue the restart if
- # the principle is conencted down the relation we think it is
- if related_units(relid=rid):
- relation_set(relation_id=rid,
- relation_settings=trigger,
- )
-
-
-def check_actually_paused(services=None, ports=None):
- """Check that services listed in the services object and and ports
- are actually closed (not listened to), to verify that the unit is
- properly paused.
-
- @param services: See _extract_services_list_helper
- @returns status, : string for status (None if okay)
- message : string for problem for status_set
- """
- state = None
- message = None
- messages = []
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, services_states = _check_running_services(services)
- if any(services_states):
- # there shouldn't be any running so this is a problem
- messages.append("these services running: {}"
- .format(", ".join(
- _filter_tuples(services_running, True))))
- state = "blocked"
- ports_open, ports_open_bools = (
- _check_listening_on_services_ports(services, True))
- if any(ports_open_bools):
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in ports_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "these service:ports are open: {}".format(message))
- state = 'blocked'
- if ports is not None:
- ports_open, bools = _check_listening_on_ports_list(ports)
- if any(bools):
- messages.append(
- "these ports which should be closed, but are open: {}"
- .format(", ".join([str(p) for p, v in ports_open if v])))
- state = 'blocked'
- if messages:
- message = ("Services should be paused but {}"
- .format(", ".join(messages)))
- return state, message
-
-
-def set_unit_paused():
- """Set the unit to a paused state in the local kv() store.
- This does NOT actually pause the unit
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', True)
-
-
-def clear_unit_paused():
- """Clear the unit from a paused state in the local kv() store
- This does NOT actually restart any services - it only clears the
- local state.
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', False)
-
-
-def is_unit_paused_set():
- """Return the state of the kv().get('unit-paused').
- This does NOT verify that the unit really is paused.
-
- To help with units that don't have HookData() (testing)
- if it excepts, return False
- """
- try:
- with unitdata.HookData()() as t:
- kv = t[0]
- # transform something truth-y into a Boolean.
- return not(not(kv.get('unit-paused')))
- except:
- return False
-
-
-def pause_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Pause a unit by stopping the services and setting 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have stopped and ports are no longer
- being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None, None to indicate that the unit
- didn't pause cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are stopped, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm pausing.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- stopped = service_pause(service)
- if not stopped:
- messages.append("{} didn't stop cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- set_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't pause: {}".format("; ".join(messages)))
-
-
-def resume_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Resume a unit by starting the services and clearning 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have started and ports are being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None to indicate that the unit
- didn't resume cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are started, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm resuming.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- started = service_resume(service)
- if not started:
- messages.append("{} didn't start cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- clear_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't resume: {}".format("; ".join(messages)))
-
-
-def make_assess_status_func(*args, **kwargs):
- """Creates an assess_status_func() suitable for handing to pause_unit()
- and resume_unit().
-
- This uses the _determine_os_workload_status(...) function to determine
- what the workload_status should be for the unit. If the unit is
- not in maintenance or active states, then the message is returned to
- the caller. This is so an action that doesn't result in either a
- complete pause or complete resume can signal failure with an action_fail()
- """
- def _assess_status_func():
- state, message = _determine_os_workload_status(*args, **kwargs)
- status_set(state, message)
- if state not in ['maintenance', 'active']:
- return message
- return None
-
- return _assess_status_func
-
-
-def pausable_restart_on_change(restart_map, stopstart=False,
- restart_functions=None):
- """A restart_on_change decorator that checks to see if the unit is
- paused. If it is paused then the decorated function doesn't fire.
-
- This is provided as a helper, as the @restart_on_change(...) decorator
- is in core.host, yet the openstack specific helpers are in this file
- (contrib.openstack.utils). Thus, this needs to be an optional feature
- for openstack charms (or charms that wish to use the openstack
- pause/resume type features).
-
- It is used as follows:
-
- from contrib.openstack.utils import (
- pausable_restart_on_change as restart_on_change)
-
- @restart_on_change(restart_map, stopstart=<boolean>)
- def some_hook(...):
- pass
-
- see core.utils.restart_on_change() for more details.
-
- @param f: the function to decorate
- @param restart_map: the restart map {conf_file: [services]}
- @param stopstart: DEFAULT false; whether to stop, start or just restart
- @returns decorator to use a restart_on_change with pausability
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- if is_unit_paused_set():
- return f(*args, **kwargs)
- # otherwise, normal restart_on_change functionality
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- juju_log("config_flags is not in expected format (key=value)",
- level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- juju_log("Invalid config value(s) at index %s" % (i),
- level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/packages.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index e29bd1b..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import subprocess
-import sys
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def pip_execute(*args, **kwargs):
- """Overriden pip_execute() to stop sys.path being changed.
-
- The act of importing main from the pip module seems to cause add wheels
- from the /usr/share/python-wheels which are installed by various tools.
- This function ensures that sys.path remains the same after the call is
- executed.
- """
- try:
- _path = sys.path
- try:
- from pip import main as _pip_execute
- except ImportError:
- apt_update()
- apt_install('python-pip')
- from pip import main as _pip_execute
- _pip_execute(*args, **kwargs)
- finally:
- sys.path = _path
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, constraints=None, **options):
- """Install a requirements file.
-
- :param constraints: Path to pip constraints file.
- http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
- """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- if constraints:
- command.append("-c {0}".format(constraints))
- log("Installing from file: {} with constraints {} "
- "and options: {}".format(requirements, constraints, command))
- else:
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None,
- constraints=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if constraints:
- command.extend(['-c', constraints])
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- apt_install('python-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index beff270..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,1333 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import errno
-import hashlib
-import math
-import six
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- config,
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-
-from charmhelpers.core.kernel import modprobe
-from charmhelpers.contrib.openstack.utils import config_flags_parser
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-
-# The number of placement groups per OSD to target for placement group
-# calculations. This number is chosen as 100 due to the ceph PG Calc
-# documentation recommending to choose 100 for clusters which are not
-# expected to increase in the foreseeable future. Since the majority of the
-# calculations are done on deployment, target the case of non-expanding
-# clusters as the default.
-DEFAULT_PGS_PER_OSD_TARGET = 100
-DEFAULT_POOL_WEIGHT = 10.0
-LEGACY_PG_COUNT = 200
-
-
-def validator(value, valid_type, valid_range=None):
- """
- Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
- Example input:
- validator(value=1,
- valid_type=int,
- valid_range=[0, 2])
- This says I'm testing value=1. It must be an int inclusive in [0,2]
-
- :param value: The value to validate
- :param valid_type: The type that value should be.
- :param valid_range: A range of values that value can assume.
- :return:
- """
- assert isinstance(value, valid_type), "{} is not a {}".format(
- value,
- valid_type)
- if valid_range is not None:
- assert isinstance(valid_range, list), \
- "valid_range must be a list, was given {}".format(valid_range)
- # If we're dealing with strings
- if valid_type is six.string_types:
- assert value in valid_range, \
- "{} is not in the list {}".format(value, valid_range)
- # Integer, float should have a min and max
- else:
- if len(valid_range) != 2:
- raise ValueError(
- "Invalid valid_range list of {} for {}. "
- "List must be [min,max]".format(valid_range, value))
- assert value >= valid_range[0], \
- "{} is less than minimum allowed value of {}".format(
- value, valid_range[0])
- assert value <= valid_range[1], \
- "{} is greater than maximum allowed value of {}".format(
- value, valid_range[1])
-
-
-class PoolCreationError(Exception):
- """
- A custom error to inform the caller that a pool creation failed. Provides an error message
- """
-
- def __init__(self, message):
- super(PoolCreationError, self).__init__(message)
-
-
-class Pool(object):
- """
- An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
- Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
- """
-
- def __init__(self, service, name):
- self.service = service
- self.name = name
-
- # Create the pool if it doesn't exist already
- # To be implemented by subclasses
- def create(self):
- pass
-
- def add_cache_tier(self, cache_pool, mode):
- """
- Adds a new cache tier to an existing pool.
- :param cache_pool: six.string_types. The cache tier pool name to add.
- :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
- :return: None
- """
- # Check the input types and values
- validator(value=cache_pool, valid_type=six.string_types)
- validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
-
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
-
- def remove_cache_tier(self, cache_pool):
- """
- Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
- :param cache_pool: six.string_types. The cache tier pool name to remove.
- :return: None
- """
- # read-only is easy, writeback is much harder
- mode = get_cache_mode(self.service, cache_pool)
- version = ceph_version()
- if mode == 'readonly':
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- elif mode == 'writeback':
- pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
- 'cache-mode', cache_pool, 'forward']
- if version >= '10.1':
- # Jewel added a mandatory flag
- pool_forward_cmd.append('--yes-i-really-mean-it')
-
- check_call(pool_forward_cmd)
- # Flush the cache and wait for it to return
- check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
- """Return the number of placement groups to use when creating the pool.
-
- Returns the number of placement groups which should be specified when
- creating the pool. This is based upon the calculation guidelines
- provided by the Ceph Placement Group Calculator (located online at
- http://ceph.com/pgcalc/).
-
- The number of placement groups are calculated using the following:
-
- (Target PGs per OSD) * (OSD #) * (%Data)
- ----------------------------------------
- (Pool size)
-
- Per the upstream guidelines, the OSD # should really be considered
- based on the number of OSDs which are eligible to be selected by the
- pool. Since the pool creation doesn't specify any of CRUSH set rules,
- the default rule will be dependent upon the type of pool being
- created (replicated or erasure).
-
- This code makes no attempt to determine the number of OSDs which can be
- selected for the specific rule, rather it is left to the user to tune
- in the form of 'expected-osd-count' config option.
-
- :param pool_size: int. pool_size is either the number of replicas for
- replicated pools or the K+M sum for erasure coded pools
- :param percent_data: float. the percentage of data that is expected to
- be contained in the pool for the specific OSD set. Default value
- is to assume 10% of the data is for this pool, which is a
- relatively low % of the data but allows for the pg_num to be
- increased. NOTE: the default is primarily to handle the scenario
- where related charms requiring pools has not been upgraded to
- include an update to indicate their relative usage of the pools.
- :return: int. The number of pgs to use.
- """
-
- # Note: This calculation follows the approach that is provided
- # by the Ceph PG Calculator located at http://ceph.com/pgcalc/.
- validator(value=pool_size, valid_type=int)
-
- # Ensure that percent data is set to something - even with a default
- # it can be set to None, which would wreak havoc below.
- if percent_data is None:
- percent_data = DEFAULT_POOL_WEIGHT
-
- # If the expected-osd-count is specified, then use the max between
- # the expected-osd-count and the actual osd_count
- osd_list = get_osds(self.service)
- expected = config('expected-osd-count') or 0
-
- if osd_list:
- osd_count = max(expected, len(osd_list))
-
- # Log a message to provide some insight if the calculations claim
- # to be off because someone is setting the expected count and
- # there are more OSDs in reality. Try to make a proper guess
- # based upon the cluster itself.
- if expected and osd_count != expected:
- log("Found more OSDs than provided expected count. "
- "Using the actual count instead", INFO)
- elif expected:
- # Use the expected-osd-count in older ceph versions to allow for
- # a more accurate pg calculations
- osd_count = expected
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- return LEGACY_PG_COUNT
-
- percent_data /= 100.0
- target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
- num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size
-
- # The CRUSH algorithm has a slight optimization for placement groups
- # with powers of 2 so find the nearest power of 2. If the nearest
- # power of 2 is more than 25% below the original value, the next
- # highest value is used. To do this, find the nearest power of 2 such
- # that 2^n <= num_pg, check to see if its within the 25% tolerance.
- exponent = math.floor(math.log(num_pg, 2))
- nearest = 2 ** exponent
- if (num_pg - nearest) > (num_pg * 0.25):
- # Choose the next highest power of 2 since the nearest is more
- # than 25% below the original value.
- return int(nearest * 2)
- else:
- return int(nearest)
-
-
-class ReplicatedPool(Pool):
- def __init__(self, service, name, pg_num=None, replicas=2,
- percent_data=10.0):
- super(ReplicatedPool, self).__init__(service=service, name=name)
- self.replicas = replicas
- if pg_num:
- # Since the number of placement groups were specified, ensure
- # that there aren't too many created.
- max_pgs = self.get_pgs(self.replicas, 100.0)
- self.pg_num = min(pg_num, max_pgs)
- else:
- self.pg_num = self.get_pgs(self.replicas, percent_data)
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
- self.name, str(self.pg_num)]
- try:
- check_call(cmd)
- # Set the pool replica size
- update_pool(client=self.service,
- pool=self.name,
- settings={'size': str(self.replicas)})
- except CalledProcessError:
- raise
-
-
-# Default jerasure erasure coded pool
-class ErasurePool(Pool):
- def __init__(self, service, name, erasure_code_profile="default",
- percent_data=10.0):
- super(ErasurePool, self).__init__(service=service, name=name)
- self.erasure_code_profile = erasure_code_profile
- self.percent_data = percent_data
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Try to find the erasure profile information in order to properly
- # size the number of placement groups. The size of an erasure
- # coded placement group is calculated as k+m.
- erasure_profile = get_erasure_profile(self.service,
- self.erasure_code_profile)
-
- # Check for errors
- if erasure_profile is None:
- msg = ("Failed to discover erasure profile named "
- "{}".format(self.erasure_code_profile))
- log(msg, level=ERROR)
- raise PoolCreationError(msg)
- if 'k' not in erasure_profile or 'm' not in erasure_profile:
- # Error
- msg = ("Unable to find k (data chunks) or m (coding chunks) "
- "in erasure profile {}".format(erasure_profile))
- log(msg, level=ERROR)
- raise PoolCreationError(msg)
-
- k = int(erasure_profile['k'])
- m = int(erasure_profile['m'])
- pgs = self.get_pgs(k + m, self.percent_data)
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
- self.name, str(pgs), str(pgs),
- 'erasure', self.erasure_code_profile]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
- """Get an existing erasure code profile if it already exists.
- Returns json formatted output"""
-
-
-def get_mon_map(service):
- """
- Returns the current monitor map.
- :param service: six.string_types. The Ceph user name to run the command under
- :return: json string. :raise: ValueError if the monmap fails to parse.
- Also raises CalledProcessError if our ceph command fails
- """
- try:
- mon_status = check_output(
- ['ceph', '--id', service,
- 'mon_status', '--format=json'])
- try:
- return json.loads(mon_status)
- except ValueError as v:
- log("Unable to parse mon_status json: {}. Error: {}".format(
- mon_status, v.message))
- raise
- except CalledProcessError as e:
- log("mon_status command failed with message: {}".format(
- e.message))
- raise
-
-
-def hash_monitor_names(service):
- """
- Uses the get_mon_map() function to get information about the monitor
- cluster.
- Hash the name of each monitor. Return a sorted list of monitor hashes
- in an ascending order.
- :param service: six.string_types. The Ceph user name to run the command under
- :rtype : dict. json dict of monitor name, ip address and rank
- example: {
- 'name': 'ip-172-31-13-165',
- 'rank': 0,
- 'addr': '172.31.13.165:6789/0'}
- """
- try:
- hash_list = []
- monitor_list = get_mon_map(service=service)
- if monitor_list['monmap']['mons']:
- for mon in monitor_list['monmap']['mons']:
- hash_list.append(
- hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
- return sorted(hash_list)
- else:
- return None
- except (ValueError, CalledProcessError):
- raise
-
-
-def monitor_key_delete(service, key):
- """
- Delete a key and value pair from the monitor cluster
- :param service: six.string_types. The Ceph user name to run the command under
- Deletes a key value pair on the monitor cluster.
- :param key: six.string_types. The key to delete.
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'del', str(key)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_set(service, key, value):
- """
- Sets a key value pair on the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to set.
- :param value: The value to set. This will be converted to a string
- before setting
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'put', str(key), str(value)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_get(service, key):
- """
- Gets the value of an existing key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for.
- :return: Returns the value of that key or None if not found.
- """
- try:
- output = check_output(
- ['ceph', '--id', service,
- 'config-key', 'get', str(key)])
- return output
- except CalledProcessError as e:
- log("Monitor config-key get failed with message: {}".format(
- e.output))
- return None
-
-
-def monitor_key_exists(service, key):
- """
- Searches for the existence of a key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for
- :return: Returns True if the key exists, False if not and raises an
- exception if an unknown error occurs. :raise: CalledProcessError if
- an unknown error occurs
- """
- try:
- check_call(
- ['ceph', '--id', service,
- 'config-key', 'exists', str(key)])
- # I can return true here regardless because Ceph returns
- # ENOENT if the key wasn't found
- return True
- except CalledProcessError as e:
- if e.returncode == errno.ENOENT:
- return False
- else:
- log("Unknown error from ceph config-get exists: {} {}".format(
- e.returncode, e.output))
- raise
-
-
-def get_erasure_profile(service, name):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param name:
- :return:
- """
- try:
- out = check_output(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name, '--format=json'])
- return json.loads(out)
- except (CalledProcessError, OSError, ValueError):
- return None
-
-
-def pool_set(service, pool_name, key, value):
- """
- Sets a value for a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param key: six.string_types
- :param value:
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def snapshot_pool(service, pool_name, snapshot_name):
- """
- Snapshots a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_snapshot(service, pool_name, snapshot_name):
- """
- Remove a snapshot from a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-# max_bytes should be an int or long
-def set_pool_quota(service, pool_name, max_bytes):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param max_bytes: int or long
- :return: None. Can raise CalledProcessError
- """
- # Set a byte quota on a RADOS pool in ceph.
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
- 'max_bytes', str(max_bytes)]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_quota(service, pool_name):
- """
- Set a byte quota on a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_erasure_profile(service, profile_name):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
- profile_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
- failure_domain='host',
- data_chunks=2, coding_chunks=1,
- locality=None, durability_estimator=None):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :param erasure_plugin_name: six.string_types
- :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
- 'room', 'root', 'row'])
- :param data_chunks: int
- :param coding_chunks: int
- :param locality: int
- :param durability_estimator: int
- :return: None. Can raise CalledProcessError
- """
- # Ensure this failure_domain is allowed by Ceph
- validator(failure_domain, six.string_types,
- ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
-
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
- 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
- 'ruleset_failure_domain=' + failure_domain]
- if locality is not None and durability_estimator is not None:
- raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
-
- # Add plugin specific information
- if locality is not None:
- # For local erasure codes
- cmd.append('l=' + str(locality))
- if durability_estimator is not None:
- # For Shec erasure codes
- cmd.append('c=' + str(durability_estimator))
-
- if erasure_profile_exists(service, profile_name):
- cmd.append('--force')
-
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def rename_pool(service, old_name, new_name):
- """
- Rename a Ceph pool from old_name to new_name
- :param service: six.string_types. The Ceph user name to run the command under
- :param old_name: six.string_types
- :param new_name: six.string_types
- :return: None
- """
- validator(value=old_name, valid_type=six.string_types)
- validator(value=new_name, valid_type=six.string_types)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
- check_call(cmd)
-
-
-def erasure_profile_exists(service, name):
- """
- Check to see if an Erasure code profile already exists.
- :param service: six.string_types. The Ceph user name to run the command under
- :param name: six.string_types
- :return: int or None
- """
- validator(value=name, valid_type=six.string_types)
- try:
- check_call(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name])
- return True
- except CalledProcessError:
- return False
-
-
-def get_cache_mode(service, pool_name):
- """
- Find the current caching mode of the pool_name given.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: int or None
- """
- validator(value=service, valid_type=six.string_types)
- validator(value=pool_name, valid_type=six.string_types)
- out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
- try:
- osd_json = json.loads(out)
- for pool in osd_json['pools']:
- if pool['pool_name'] == pool_name:
- return pool['cache_mode']
- return None
- except ValueError:
- raise
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out.split()
-
-
-def get_osds(service):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
- """
- version = ceph_version()
- if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
-
- return None
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def update_pool(client, pool, settings):
- cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
- for k, v in six.iteritems(settings):
- cmd.append(k)
- cmd.append(v)
-
- check_call(cmd)
-
-
-def create_pool(service, name, replicas=3, pg_num=None):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- if not pg_num:
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pg_num = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pg_num = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
- check_call(cmd)
-
- update_pool(service, name, settings={'size': str(replicas)})
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes(relation='ceph'):
- """Query named relation to determine current nodes."""
- hosts = []
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- create_keyring(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- Returns False if no ceph key is available in relation state.
- """
- key = None
- for rid in relation_ids(relation):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- create_keyring(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_create_pool(self, name, replica_count=3, pg_num=None,
- weight=None):
- """Adds an operation to create a pool.
-
- @param pg_num setting: optional setting. If not provided, this value
- will be calculated by the broker based on how many OSDs are in the
- cluster at the time of creation. Note that, if provided, this value
- will be capped at the current available maximum.
- @param weight: the percentage of data the pool makes up
- """
- if pg_num and weight:
- raise ValueError('pg_num and weight are mutually exclusive')
-
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count, 'pg_num': pg_num,
- 'weight': weight})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
- if self.ops[req_no].get(key) != other.ops[req_no].get(key):
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-# <Request complete actions>
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request, relation='ceph'):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids(relation):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request, relation='ceph'):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request, relation=relation):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids(relation):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
-
-
-class CephConfContext(object):
- """Ceph config (ceph.conf) context.
-
- Supports user-provided Ceph configuration settings. Use can provide a
- dictionary as the value for the config-flags charm option containing
- Ceph configuration settings keyede by their section in ceph.conf.
- """
- def __init__(self, permitted_sections=None):
- self.permitted_sections = permitted_sections or []
-
- def __call__(self):
- conf = config('config-flags')
- if not conf:
- return {}
-
- conf = config_flags_parser(conf)
- if type(conf) != dict:
- log("Provided config-flags is not a dictionary - ignoring",
- level=WARNING)
- return {}
-
- permitted = self.permitted_sections
- if permitted:
- diff = set(conf.keys()).difference(set(permitted))
- if diff:
- log("Config-flags contains invalid keys '%s' - they will be "
- "ignored" % (', '.join(diff)), level=WARNING)
-
- ceph_conf = {}
- for key in conf:
- if permitted and key not in permitted:
- log("Ignoring key '%s'" % key, level=WARNING)
- continue
-
- ceph_conf[key] = conf[key]
-
- return ceph_conf
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index 1d6ae6f..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- devs = [d.strip().split(' ') for d in
- check_output(cmd).splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
-
-
-def is_mapped_loopback_device(device):
- """
- Checks if a given device name is an existing/mapped loopback device.
- :param device: str: Full path to the device (eg, /dev/loop1).
- :returns: str: Path to the backing file if is a loopback device
- empty string otherwise
- """
- return loopback_devices().get(device, "")
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index 4719f53..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index 3dc0df6..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- check_call,
- check_output,
- call
-)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- try:
- out = check_output(['lsblk', '-P', device]).decode('UTF-8')
- except:
- return False
- return bool(re.search(r'MOUNTPOINT=".+"', out))
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/decorators.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index 6ad41ee..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/files.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/files.py
deleted file mode 100644
index fdd82b7..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/fstab.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index d9fa915..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hookenv.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index 48b2b9d..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1007 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The <class> and <id> provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The <class> and <id> provided must match a payload that has been previously
- registered with juju using payload-register. The <status> must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- <name> must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).decode('UTF-8').strip()
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/host.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/host.py
deleted file mode 100644
index 5306859..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,765 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = True
- if service_running(service_name):
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_running(service_name)
- if not started:
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-_UPSTART_CONF = "/etc/init/{}.conf"
-_INIT_D_CONF = "/etc/init.d/{}"
-
-
-def service_running(service_name):
- """Determine whether a system service is running"""
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- if os.path.exists(_UPSTART_CONF.format(service_name)):
- try:
- output = subprocess.check_output(
- ['status', service_name],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- # This works for upstart scripts where the 'service' command
- # returns a consistent string to represent running 'start/running'
- if "start/running" in output:
- return True
- elif os.path.exists(_INIT_D_CONF.format(service_name)):
- # Check System V scripts init script return codes
- return service('status', service_name)
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False,
- primary_group=None, secondary_groups=None, uid=None, home_dir=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
- :param int uid: UID for user being created
- :param str home_dir: Home directory for user
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- if uid:
- user_info = pwd.getpwuid(int(uid))
- log('user with uid {0} already exists!'.format(uid))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if uid:
- cmd.extend(['--uid', str(uid)])
- if home_dir:
- cmd.extend(['--home', str(home_dir)])
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def uid_exists(uid):
- """Check if a uid exists"""
- try:
- pwd.getpwuid(uid)
- uid_exists = True
- except KeyError:
- uid_exists = False
- return uid_exists
-
-
-def group_exists(groupname):
- """Check if a group exists"""
- try:
- grp.getgrnam(groupname)
- group_exists = True
- except KeyError:
- group_exists = False
- return group_exists
-
-
-def gid_exists(gid):
- """Check if a gid exists"""
- try:
- grp.getgrgid(gid)
- gid_exists = True
- except KeyError:
- gid_exists = False
- return gid_exists
-
-
-def add_group(group_name, system_group=False, gid=None):
- """Add a group to the system
-
- Will log but otherwise succeed if the group already exists.
-
- :param str group_name: group to create
- :param bool system_group: Create system group
- :param int gid: GID for user being created
-
- :returns: The password database entry struct, as returned by `grp.getgrnam`
- """
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- if gid:
- group_info = grp.getgrgid(gid)
- log('group with gid {0} already exists!'.format(gid))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if gid:
- cmd.extend(['--gid', str(gid)])
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False, restart_functions=None):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result of lambda_f()
- """
- if restart_functions is None:
- restart_functions = {}
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for service_name in services_list:
- if service_name in restart_functions:
- restart_functions[service_name](service_name)
- else:
- for action in actions:
- service(action, service_name)
- return r
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also Chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hugepage.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 54b5b5e..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/kernel.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index b166efe..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 61fd074..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/base.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index ca9dc99..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,351 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3e6e30d..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/strutils.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index dd9b971..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/sysctl.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 6e413e3..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/templating.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 7b801a3..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2 or python3-jinja2; if it is not
- installed, calling this will attempt to use charmhelpers.fetch.apt_install
- to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- if sys.version_info.major == 2:
- apt_install('python-jinja2', fatal=True)
- else:
- apt_install('python3-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/unitdata.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 54ec969..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,518 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 52eaf82..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
- # Newton
- 'newton': 'xenial-updates/newton',
- 'xenial-newton': 'xenial-updates/newton',
- 'xenial-newton/updates': 'xenial-updates/newton',
- 'xenial-updates/newton': 'xenial-updates/newton',
- 'newton/proposed': 'xenial-proposed/newton',
- 'xenial-newton/proposed': 'xenial-proposed/newton',
- 'xenial-proposed/newton': 'xenial-proposed/newton',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True, progress=None):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache(progress)
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- for handler in handlers:
- try:
- return handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- raise UnhandledSource("No handler found for source {}".format(source))
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except NotImplementedError:
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index dd24f9e..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'wb') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index b3404d8..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-from subprocess import check_call
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-from charmhelpers.core.host import mkdir
-
-
-if filter_installed_packages(['bzr']) != []:
- apt_install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
- else:
- return True
-
- def branch(self, source, dest, revno=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- cmd_opts = []
- if revno:
- cmd_opts += ['-r', str(revno)]
- if os.path.exists(dest):
- cmd = ['bzr', 'pull']
- cmd += cmd_opts
- cmd += ['--overwrite', '-d', dest, source]
- else:
- cmd = ['bzr', 'branch']
- cmd += cmd_opts
- cmd += [source, dest]
- check_call(cmd)
-
- def install(self, source, dest=None, revno=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
-
- if dest and not os.path.exists(dest):
- mkdir(dest, perms=0o755)
-
- try:
- self.branch(source, dest_dir, revno)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f708d1e..0000000
--- a/charms/trusty/ceilometer-agent/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-from subprocess import check_call, CalledProcessError
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-
-if filter_installed_packages(['git']) != []:
- apt_install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
- else:
- return True
-
- def clone(self, source, dest, branch="master", depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
- else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
- if depth:
- cmd.extend(['--depth', depth])
- check_call(cmd)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- try:
- self.clone(source, dest_dir, branch, depth)
- except CalledProcessError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer-agent/hooks/config-changed b/charms/trusty/ceilometer-agent/hooks/config-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/install b/charms/trusty/ceilometer-agent/hooks/install
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/nova-ceilometer-relation-joined b/charms/trusty/ceilometer-agent/hooks/nova-ceilometer-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/nova-ceilometer-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-changed b/charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-joined b/charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/nrpe-external-master-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/start b/charms/trusty/ceilometer-agent/hooks/start
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/stop b/charms/trusty/ceilometer-agent/hooks/stop
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/hooks/upgrade-charm b/charms/trusty/ceilometer-agent/hooks/upgrade-charm
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer-agent/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/icon.svg b/charms/trusty/ceilometer-agent/icon.svg
deleted file mode 100644
index 84de61c..0000000
--- a/charms/trusty/ceilometer-agent/icon.svg
+++ /dev/null
@@ -1,717 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- sodipodi:docname="openstack-ceilometer.svg"
- inkscape:version="0.48+devel r12825"
- version="1.1"
- id="svg6517"
- height="96"
- width="96">
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="2.7214607"
- inkscape:cy="63.792857"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1029"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="false"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false"
- inkscape:snap-global="true"
- inkscape:snap-bbox="true"
- inkscape:bbox-paths="true"
- inkscape:bbox-nodes="true"
- inkscape:snap-bbox-edge-midpoints="true"
- inkscape:snap-bbox-midpoints="true"
- inkscape:object-paths="true"
- inkscape:snap-intersection-paths="true"
- inkscape:object-nodes="true"
- inkscape:snap-smooth-nodes="true"
- inkscape:snap-midpoints="true"
- inkscape:snap-object-midpoints="true"
- inkscape:snap-center="true"
- inkscape:snap-grids="false"
- inkscape:snap-nodes="true"
- inkscape:snap-others="true">
- <inkscape:grid
- id="grid821"
- type="xygrid" />
- <sodipodi:guide
- id="guide823"
- position="18.34962,45.78585"
- orientation="1,0" />
- <sodipodi:guide
- id="guide827"
- position="78.02001,46.32673"
- orientation="1,0" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4184"
- position="65.586619,19.307"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4188"
- position="62.756032,71.583147"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4190"
- position="47.812194,78.049658"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- id="guide4194"
- position="25.60516,42.21665"
- orientation="1,0" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4202"
- position="25.60516,42.070975"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4204"
- position="25.60516,42.070975"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4242"
- position="51.81985,44.36226"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4252"
- position="73.5625,75.210937"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- inkscape:color="rgb(140,140,240)"
- id="guide4254"
- position="18.34962,75.472017"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4288"
- position="21.871042,21.577512"
- orientation="-0.70710678,-0.70710678" />
- </sodipodi:namedview>
- <defs
- id="defs6519">
- <filter
- id="filter1121"
- inkscape:label="Inner Shadow"
- style="color-interpolation-filters:sRGB;">
- <feFlood
- id="feFlood1123"
- result="flood"
- flood-color="rgb(0,0,0)"
- flood-opacity="0.59999999999999998" />
- <feComposite
- id="feComposite1125"
- result="composite1"
- operator="out"
- in2="SourceGraphic"
- in="flood" />
- <feGaussianBlur
- id="feGaussianBlur1127"
- result="blur"
- stdDeviation="1"
- in="composite1" />
- <feOffset
- id="feOffset1129"
- result="offset"
- dy="2"
- dx="0" />
- <feComposite
- id="feComposite1131"
- result="composite2"
- operator="atop"
- in2="SourceGraphic"
- in="offset" />
- </filter>
- <filter
- id="filter950"
- inkscape:label="Drop Shadow"
- style="color-interpolation-filters:sRGB;">
- <feFlood
- id="feFlood952"
- result="flood"
- flood-color="rgb(0,0,0)"
- flood-opacity="0.25" />
- <feComposite
- id="feComposite954"
- result="composite1"
- operator="in"
- in2="SourceGraphic"
- in="flood" />
- <feGaussianBlur
- id="feGaussianBlur956"
- result="blur"
- stdDeviation="1"
- in="composite1" />
- <feOffset
- id="feOffset958"
- result="offset"
- dy="1"
- dx="0" />
- <feComposite
- id="feComposite960"
- result="composite2"
- operator="over"
- in2="offset"
- in="SourceGraphic" />
- </filter>
- <filter
- inkscape:label="Badge Shadow"
- id="filter891"
- inkscape:collect="always">
- <feGaussianBlur
- id="feGaussianBlur893"
- stdDeviation="0.71999962"
- inkscape:collect="always" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3831">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.86309522"
- id="feGaussianBlur3833" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3868"
- x="-0.17186206"
- width="1.3437241"
- y="-0.1643077"
- height="1.3286154">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.62628186"
- id="feGaussianBlur3870" />
- </filter>
- <linearGradient
- id="linearGradient4328"
- inkscape:collect="always">
- <stop
- id="stop4330"
- offset="0"
- style="stop-color:#871f1c;stop-opacity:1;" />
- <stop
- id="stop4332"
- offset="1"
- style="stop-color:#651715;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- id="linearGradient902"
- inkscape:collect="always">
- <stop
- id="stop904"
- offset="0"
- style="stop-color:#cccccc;stop-opacity:1" />
- <stop
- id="stop906"
- offset="1"
- style="stop-color:#e6e6e6;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- id="Background">
- <stop
- style="stop-color:#22779e;stop-opacity:1"
- offset="0"
- id="stop4178" />
- <stop
- style="stop-color:#2991c0;stop-opacity:1"
- offset="1"
- id="stop4180" />
- </linearGradient>
- <clipPath
- id="clipPath873"
- clipPathUnits="userSpaceOnUse">
- <g
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- inkscape:label="Layer 1"
- id="g875"
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)">
- <path
- sodipodi:nodetypes="sssssssss"
- inkscape:connector-curvature="0"
- id="path877"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline" />
- </g>
- </clipPath>
- <style
- type="text/css"
- id="style867">
- .fil0 {fill:#1F1A17}
- </style>
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="635.29077"
- x2="-220"
- y1="731.29077"
- x1="-220"
- id="linearGradient908"
- xlink:href="#linearGradient902"
- inkscape:collect="always" />
- <clipPath
- id="clipPath16">
- <path
- d="m -9,-9 614,0 0,231 -614,0 0,-231 z"
- id="path18" />
- </clipPath>
- <clipPath
- id="clipPath116">
- <path
- d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
- id="path118" />
- </clipPath>
- <clipPath
- id="clipPath128">
- <path
- d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
- id="path130" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient3850">
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0"
- id="stop3852" />
- <stop
- style="stop-color:#000000;stop-opacity:0;"
- offset="1"
- id="stop3854" />
- </linearGradient>
- <clipPath
- id="clipPath3095"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3097"
- d="m 976.648,389.551 -842.402,0 0,839.999 842.402,0 0,-839.999" />
- </clipPath>
- <clipPath
- id="clipPath3195"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3197"
- d="m 611.836,756.738 -106.34,105.207 c -8.473,8.289 -13.617,20.102 -13.598,33.379 L 598.301,790.207 c -0.031,-13.418 5.094,-25.031 13.535,-33.469" />
- </clipPath>
- <clipPath
- id="clipPath3235"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3237"
- d="m 1095.64,1501.81 c 35.46,-35.07 70.89,-70.11 106.35,-105.17 4.4,-4.38 7.11,-10.53 7.11,-17.55 l -106.37,105.21 c 0,7 -2.71,13.11 -7.09,17.51" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient4389">
- <stop
- style="stop-color:#871f1c;stop-opacity:1"
- offset="0"
- id="stop4391" />
- <stop
- style="stop-color:#c42e24;stop-opacity:1"
- offset="1"
- id="stop4393" />
- </linearGradient>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4591">
- <path
- id="path4593"
- style="fill:#ff00ff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 1106.6009,730.43734 -0.036,21.648 c -0.01,3.50825 -2.8675,6.61375 -6.4037,6.92525 l -83.6503,7.33162 c -3.5205,0.30763 -6.3812,-2.29987 -6.3671,-5.8145 l 0.036,-21.6475 20.1171,-1.76662 -0.011,4.63775 c 0,1.83937 1.4844,3.19925 3.3262,3.0395 l 49.5274,-4.33975 c 1.8425,-0.166 3.3425,-1.78125 3.3538,-3.626 l 0.01,-4.63025 20.1,-1.7575"
- inkscape:connector-curvature="0" />
- </clipPath>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="radialGradient3856"
- cx="-26.508606"
- cy="93.399292"
- fx="-26.508606"
- fy="93.399292"
- r="20.40658"
- gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
- gradientUnits="userSpaceOnUse" />
- <filter
- inkscape:collect="always"
- id="filter3885">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="5.7442192"
- id="feGaussianBlur3887" />
- </filter>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="linearGradient3895"
- x1="348.20132"
- y1="593.11615"
- x2="-51.879555"
- y2="993.19702"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-318.48033,212.32022)" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="radialGradient3902"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
- cx="-26.508606"
- cy="93.399292"
- fx="-26.508606"
- fy="93.399292"
- r="20.40658" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="linearGradient3904"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-318.48033,212.32022)"
- x1="348.20132"
- y1="593.11615"
- x2="-51.879555"
- y2="993.19702" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="23.383789"
- x2="25.217773"
- y1="27.095703"
- x1="21.505859"
- id="linearGradient4318"
- xlink:href="#linearGradient4389"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="20.884073"
- x2="71.960243"
- y1="20.041777"
- x1="72.802544"
- id="linearGradient4326"
- xlink:href="#linearGradient4389"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="74.246689"
- x2="21.69179"
- y1="73.643555"
- x1="22.294922"
- id="linearGradient4334"
- xlink:href="#linearGradient4328"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="24.881023"
- x2="57.450542"
- y1="77.404816"
- x1="57.450542"
- id="linearGradient4319"
- xlink:href="#linearGradient4552"
- inkscape:collect="always" />
- <linearGradient
- id="linearGradient4552"
- inkscape:collect="always">
- <stop
- id="stop4554"
- offset="0"
- style="stop-color:#d93023;stop-opacity:1" />
- <stop
- id="stop4556"
- offset="1"
- style="stop-color:#e63f46;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4444"
- x1="-228.90239"
- y1="694.04291"
- x2="-223.99701"
- y2="687.45367"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4469"
- x1="-216.46823"
- y1="693.81781"
- x2="-210.73438"
- y2="687.75952"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4479"
- x1="-206.06966"
- y1="682.03033"
- x2="-199.5918"
- y2="675.95483"
- gradientUnits="userSpaceOnUse" />
- </defs>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- style="display:inline"
- transform="translate(268,-635.29076)"
- id="layer1"
- inkscape:groupmode="layer"
- inkscape:label="BACKGROUND">
- <path
- sodipodi:nodetypes="sssssssss"
- inkscape:connector-curvature="0"
- id="path6455"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- style="fill:url(#linearGradient908);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)" />
- <g
- id="g4336">
- <g
- transform="matrix(0.06790711,0,0,-0.06790711,-239.0411,765.68623)"
- id="g3897"
- xml:space="default">
- <path
- inkscape:connector-curvature="0"
- style="opacity:0.7;color:#000000;fill:url(#radialGradient3902);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3831);enable-background:accumulate"
- d="m -48.09375,67.8125 c -0.873996,-0.0028 -2.089735,0.01993 -3.40625,0.09375 -2.633031,0.147647 -5.700107,0.471759 -7.78125,1.53125 a 1.0001,1.0001 0 0 0 -0.25,1.59375 L -38.8125,92.375 a 1.0001,1.0001 0 0 0 0.84375,0.3125 L -24,90.5625 a 1.0001,1.0001 0 0 0 0.53125,-1.71875 L -46.0625,68.125 a 1.0001,1.0001 0 0 0 -0.625,-0.28125 c 0,0 -0.532254,-0.02842 -1.40625,-0.03125 z"
- transform="matrix(10.616011,0,0,-10.616011,357.98166,1725.8152)"
- id="path3821"
- xml:space="default" />
- <path
- style="opacity:0.6;color:#000000;fill:none;stroke:#000000;stroke-width:2.77429962;stroke-linecap:round;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3868);enable-background:accumulate"
- d="m -15.782705,81.725197 8.7458304,9.147937"
- id="path3858"
- inkscape:connector-curvature="0"
- transform="matrix(10.616011,0,0,-10.616011,39.50133,1725.8152)"
- xml:space="default" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;fill:url(#linearGradient3904);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3885);enable-background:accumulate;font-family:Sans;-inkscape-font-specification:Sans"
- d="m -95.18931,981.03569 a 10.617073,10.617073 0 0 1 -0.995251,-0.3318 l -42.795789,-5.308 a 10.617073,10.617073 0 0 1 -6.30326,-17.9145 L -4.2897203,812.5065 a 10.617073,10.617073 0 0 1 8.95726,-3.3175 l 49.0990503,7.63026 a 10.617073,10.617073 0 0 1 5.97151,17.91452 L -87.55905,978.04989 a 10.617073,10.617073 0 0 1 -7.63026,2.9858 z"
- id="path3874"
- inkscape:connector-curvature="0"
- xml:space="default" />
- </g>
- <path
- style="opacity:1;color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="M 20.697266 20.515625 C 19.336871 21.10204 18.348875 22.456253 18.345703 23.970703 L 18.351562 58.322266 L 28.798828 49.138672 C 28.677618 48.755492 28.614281 48.351433 28.613281 47.939453 C 28.613261 46.832713 29.052994 45.731919 29.833984 44.880859 C 30.614994 44.029779 31.672894 43.497091 32.777344 43.400391 C 34.625174 43.240001 36.250631 44.319081 36.769531 46.050781 L 42.583984 46.052734 C 42.966392 45.246167 43.574155 44.582164 44.298828 44.115234 L 20.697266 20.515625 z M 36.501953 49.099609 C 35.800103 50.580079 34.357634 51.603391 32.777344 51.744141 C 32.038304 51.807991 31.313171 51.674389 30.675781 51.355469 L 18.351562 62.191406 L 18.353516 69.601562 C 18.349848 70.477025 18.685456 71.239319 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.660156 79.126953 L 33.488281 71.738281 L 67.28125 68.777344 C 67.744386 68.736546 68.184049 68.603285 68.587891 68.404297 L 49.771484 49.589844 C 49.024593 50.774607 47.754946 51.625037 46.310547 51.751953 C 44.461497 51.913663 42.833613 50.834232 42.314453 49.101562 L 36.501953 49.099609 z "
- transform="translate(-268,635.29076)"
- id="path4308" />
- <path
- style="opacity:1;color:#000000;fill:#c42e24;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="M 67.320312 16.253906 L 21.822266 20.212891 C 21.426436 20.248017 21.052174 20.362637 20.697266 20.515625 L 44.298828 44.115234 C 44.9049 43.724723 45.592393 43.470157 46.3125 43.40625 C 46.79566 43.36596 47.274906 43.410199 47.728516 43.537109 L 53.324219 36.660156 C 53.017769 36.094616 52.857922 35.452456 52.857422 34.785156 C 52.857752 32.480326 54.723287 30.446511 57.023438 30.244141 C 59.119587 30.062831 60.885597 31.472453 61.148438 33.533203 L 70.771484 35.117188 L 70.771484 38.248047 L 70.775391 31.386719 L 77.232422 24.398438 L 69.892578 17.179688 L 69.884766 17.189453 C 69.251763 16.542736 68.342666 16.171306 67.320312 16.253906 z M 70.771484 38.248047 L 60.412109 36.541016 C 59.630809 37.708426 58.367804 38.472897 57.021484 38.591797 C 56.537844 38.632787 56.057726 38.589411 55.603516 38.462891 L 50.007812 45.337891 C 50.314462 45.903801 50.474339 46.547144 50.474609 47.214844 C 50.474197 48.071259 50.213409 48.888836 49.771484 49.589844 L 68.587891 68.404297 C 69.859183 67.777881 70.75673 66.462035 70.759766 65.015625 L 70.771484 38.248047 z "
- transform="translate(-268,635.29076)"
- id="path4233" />
- <rect
- xml:space="default"
- y="648.49109"
- x="-258.70667"
- height="69.20665"
- width="69.20665"
- id="rect3585-3"
- style="opacity:0.8;color:#000000;fill:none;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4318);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:5.25;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
- d="M 22.029297 20.195312 L 21.822266 20.212891 C 19.919838 20.381715 18.370776 22.043134 18.349609 23.939453 L 24.662109 30.251953 L 25.605469 31.195312 L 25.605469 31.103516 C 25.609469 29.193966 27.168951 27.515473 29.082031 27.345703 L 29.171875 27.337891 L 28.373047 26.539062 L 22.029297 20.195312 z "
- transform="translate(-268,635.29076)"
- id="path4256" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.53600003;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4326);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
- d="M 67.330078 16.253906 L 68.03125 16.955078 L 74.472656 23.396484 L 74.580078 23.386719 C 75.531927 23.309814 76.390588 23.620657 77.015625 24.185547 L 69.892578 17.179688 L 69.884766 17.189453 C 69.253843 16.544862 68.348328 16.174551 67.330078 16.253906 z M 77.054688 24.222656 C 77.115589 24.279686 77.164628 24.348282 77.220703 24.410156 L 77.232422 24.398438 L 77.054688 24.222656 z "
- transform="translate(-268,635.29076)"
- id="path4272" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4334);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:1.7;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
- d="M 18.363281 69.712891 C 18.387957 70.540342 18.709001 71.264013 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.599609 79.068359 C 26.044831 78.550125 25.698241 77.821152 25.638672 76.988281 L 18.951172 70.298828 L 18.363281 69.712891 z M 26.636719 79.103516 L 26.660156 79.126953 L 26.664062 79.123047 C 26.655656 79.11562 26.645042 79.111033 26.636719 79.103516 z "
- transform="translate(-268,635.29076)"
- id="path4290" />
- <path
- style="opacity:1;color:#000000;fill:#96231e;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -249.52901,697.37716 7.14034,7.23587 12.32422,-10.83594 -7.25977,-7.13086 z"
- id="path4428"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -249.64844,693.61303 7.25977,7.13085 5.06445,-14.09765 -7.65515,-5.41781 z"
- id="path4426"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#96231e;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -231.49805,684.39037 7.25977,7.13086 5.8125,0.002 -7.25977,-7.13086 z"
- id="path4430"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4469);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -225.68555,684.39232 7.25977,7.13091 -0.51715,5.74927 8.04214,0.24126 4.42318,-7.15751 -4.25676,-2.59674 -7.25782,-7.13086 z"
- id="path4446"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#9d241f;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -210.73437,687.75951 -7.25782,-7.13086 5.59571,-6.875 7.25976,7.13086 z"
- id="path4432"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#9d241f;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -207.58789,671.83178 7.25781,7.13281 10.36133,1.70508 -7.25977,-7.13086 z"
- id="path4434"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4444);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -231.49805,684.39037 7.25977,7.13086 -0.0125,5.42958 -5.81371,-3.17372 -7.25977,-7.13086 z"
- id="path4436"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4479);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -205.13672,680.88451 -7.25976,-7.13086 4.80859,-1.92187 7.25781,7.13281 -0.59845,5.11879 z"
- id="path4471"
- inkscape:connector-curvature="0" />
- <path
- style="fill:url(#linearGradient4319);fill-opacity:1;fill-rule:nonzero;stroke:none;display:inline;opacity:1"
- d="M 74.580078 23.386719 L 29.082031 27.345703 C 27.168951 27.515473 25.609469 29.193966 25.605469 31.103516 L 25.611328 65.453125 L 36.058594 56.269531 C 35.937384 55.886351 35.874047 55.482292 35.873047 55.070312 C 35.873027 53.963572 36.31276 52.862779 37.09375 52.011719 C 37.87476 51.160639 38.932659 50.62795 40.037109 50.53125 C 41.884939 50.37086 43.510397 51.449941 44.029297 53.181641 L 49.84375 53.183594 C 50.54598 51.702464 51.989182 50.677429 53.570312 50.537109 C 54.053473 50.496819 54.532718 50.541059 54.986328 50.667969 L 60.583984 43.792969 C 60.277534 43.227429 60.117687 42.583316 60.117188 41.916016 C 60.117518 39.611186 61.983053 37.579323 64.283203 37.376953 C 66.379353 37.195643 68.145363 38.603313 68.408203 40.664062 L 78.03125 42.248047 L 78.03125 45.378906 L 78.041016 26.554688 C 78.044016 24.640387 76.496418 23.231889 74.580078 23.386719 z M 78.03125 45.378906 L 67.669922 43.673828 C 66.888622 44.841238 65.625617 45.603756 64.279297 45.722656 C 63.795657 45.763646 63.317491 45.72027 62.863281 45.59375 L 57.265625 52.46875 C 57.572275 53.03466 57.734105 53.678003 57.734375 54.345703 C 57.733265 56.649533 55.869342 58.680803 53.570312 58.882812 C 51.721263 59.044523 50.093379 57.965092 49.574219 56.232422 L 43.761719 56.230469 C 43.059869 57.710939 41.617399 58.736203 40.037109 58.876953 C 39.298069 58.940803 38.572937 58.805248 37.935547 58.486328 L 25.611328 69.322266 L 25.613281 76.734375 C 25.605281 78.643715 27.159736 80.061651 29.072266 79.894531 L 74.541016 75.910156 C 76.462106 75.740926 78.015531 74.054318 78.019531 72.148438 L 78.03125 45.378906 z "
- transform="translate(-268,635.29076)"
- id="path4633-5" />
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;display:inline;opacity:0.3"
- d="M 74.580078 23.386719 L 29.082031 27.345703 C 27.168951 27.515473 25.609469 29.193966 25.605469 31.103516 L 25.605469 31.302734 C 25.609469 29.393184 27.168951 27.714692 29.082031 27.544922 L 74.580078 23.585938 C 76.495748 23.431162 78.042931 24.838676 78.041016 26.751953 L 78.041016 26.554688 C 78.044016 24.640387 76.496418 23.231889 74.580078 23.386719 z M 60.125 42.041016 C 60.124551 42.066189 60.117191 42.089993 60.117188 42.115234 C 60.117651 42.734094 60.260989 43.33044 60.525391 43.865234 L 60.583984 43.792969 C 60.296842 43.263061 60.144234 42.663329 60.125 42.041016 z M 67.669922 43.673828 C 66.888622 44.841238 65.625617 45.603756 64.279297 45.722656 C 63.795657 45.763646 63.317491 45.72027 62.863281 45.59375 L 57.265625 52.46875 C 57.287847 52.509761 57.303524 52.553899 57.324219 52.595703 L 62.863281 45.792969 C 63.317491 45.919879 63.795657 45.962875 64.279297 45.921875 C 65.625617 45.802975 66.888622 45.040457 67.669922 43.873047 L 78.03125 45.578125 L 78.03125 45.378906 L 67.669922 43.673828 z M 57.726562 54.419922 C 57.68528 56.692433 55.844165 58.683016 53.570312 58.882812 C 51.721263 59.044523 50.093379 57.965092 49.574219 56.232422 L 43.761719 56.230469 C 43.059869 57.710939 41.617399 58.736203 40.037109 58.876953 C 39.298069 58.940803 38.572937 58.805248 37.935547 58.486328 L 25.611328 69.322266 L 25.611328 69.521484 L 37.935547 58.685547 C 38.572937 59.004467 39.298069 59.140072 40.037109 59.076172 C 41.617399 58.935422 43.059869 57.910157 43.761719 56.429688 L 49.574219 56.431641 C 50.093379 58.164311 51.721262 59.243741 53.570312 59.082031 C 55.869343 58.880021 57.733375 56.848752 57.734375 54.544922 C 57.734358 54.502688 57.727868 54.461948 57.726562 54.419922 z M 35.880859 55.181641 C 35.880238 55.211108 35.873046 55.24005 35.873047 55.269531 C 35.873907 55.623997 35.929222 55.970202 36.019531 56.304688 L 36.058594 56.269531 C 35.948415 55.921224 35.892049 55.554719 35.880859 55.181641 z "
- transform="translate(-268,635.29076)"
- id="path4481" />
- </g>
- <path
- sodipodi:nodetypes="ccccc"
- xml:space="default"
- inkscape:connector-curvature="0"
- id="path4181"
- d="m -99.97999,710.89598 0.0188,-52.54136 -52.43365,4.60327 0,52.52379 z"
- style="color:#000000;fill:#ffff00;fill-opacity:0.47839511;fill-rule:nonzero;stroke:none;stroke-width:1.70000005;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
- <rect
- xml:space="default"
- style="opacity:0.8;color:#000000;fill:#ff00ff;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="rect3908"
- width="69.206657"
- height="69.206657"
- x="279.50037"
- y="648.49109"
- transform="scale(-1,1)" />
- <path
- id="path100"
- d="m -113.71761,672.66709 c -2.30015,0.20237 -4.16481,2.23468 -4.16514,4.53951 5e-4,0.6673 0.16069,1.31071 0.46714,1.87625 0,0 -5.59737,6.87651 -5.59737,6.87651 -0.45361,-0.12691 -0.93271,-0.1712 -1.41587,-0.13091 -1.58113,0.14032 -3.02501,1.16568 -3.72724,2.64681 0,0 -5.81396,-0.002 -5.81396,-0.002 -0.5189,-1.7317 -2.14454,-2.81099 -3.99237,-2.6506 -1.10445,0.0967 -2.16372,0.62919 -2.94473,1.48027 -0.78099,0.85106 -1.21974,1.95097 -1.21972,3.05771 0.001,0.41198 0.0636,0.81614 0.18481,1.19932 0,0 -10.45278,9.18929 -10.45278,9.18929 0,0 0,3.8696 0,3.8696 0,0 12.3297,-10.84125 12.3297,-10.84125 0.63739,0.31892 1.36286,0.45336 2.1019,0.38951 1.58029,-0.14075 3.02321,-1.16576 3.72506,-2.64623 0,0 5.81233,0.002 5.81233,0.002 0.51916,1.73267 2.14635,2.81208 3.9954,2.65037 2.29903,-0.20201 4.16306,-2.23263 4.16417,-4.53646 -2.7e-4,-0.6677 -0.16047,-1.31155 -0.46712,-1.87746 0,0 5.59606,-6.87475 5.59606,-6.87475 0.45421,0.12652 0.93388,0.17026 1.41752,0.12927 1.34632,-0.1189 2.60939,-0.8825 3.39069,-2.04991 0,0 10.35996,1.70595 10.35996,1.70595 0,0 0.001,-3.13044 0.001,-3.13044 0,0 -9.62326,-1.58511 -9.62326,-1.58511 -0.26284,-2.06075 -2.03015,-3.46879 -4.1263,-3.28748 0,0 0,0 0,0"
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;color-interpolation:sRGB;color-interpolation-filters:linearRGB;fill:#000000;fill-opacity:1;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
- inkscape:connector-curvature="0" />
- </g>
- <g
- style="display:inline"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- id="layer3"
- inkscape:groupmode="layer" />
- <g
- sodipodi:insensitive="true"
- style="display:none"
- inkscape:label="BADGE"
- id="layer2"
- inkscape:groupmode="layer">
- <g
- clip-path="none"
- id="g4394"
- transform="translate(-340.00001,-581)"
- style="display:inline">
- <g
- id="g855">
- <g
- style="opacity:0.6;filter:url(#filter891)"
- clip-path="url(#clipPath873)"
- id="g870"
- inkscape:groupmode="maskhelper">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path844"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)" />
- </g>
- <g
- id="g862">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4398"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4400"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.25,0,0,1.25,33,-100.45273)" />
- <path
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- inkscape:randomized="0"
- inkscape:rounded="0.1"
- inkscape:flatsided="false"
- sodipodi:arg2="1.6755161"
- sodipodi:arg1="1.0471976"
- sodipodi:r2="4.3458705"
- sodipodi:r1="7.2431178"
- sodipodi:cy="589.50385"
- sodipodi:cx="666.19574"
- sodipodi:sides="5"
- id="path4459"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="star" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/ceilometer-agent/local.yaml b/charms/trusty/ceilometer-agent/local.yaml
deleted file mode 100644
index 9df185f..0000000
--- a/charms/trusty/ceilometer-agent/local.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-ceilometer-agent:
- openstack-origin: "ppa:openstack-ubuntu-testing/grizzly-trunk-testing"
- config-file: "/etc/nova/nova.conf"
diff --git a/charms/trusty/ceilometer-agent/metadata.yaml b/charms/trusty/ceilometer-agent/metadata.yaml
deleted file mode 100644
index 8ff00ab..0000000
--- a/charms/trusty/ceilometer-agent/metadata.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-name: ceilometer-agent
-subordinate: true
-summary: Subordinate charm for deploying Ceilometer compute agent
-maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
-description: |
- Ceilometer project aims to become the infrastructure to collect measurements
- within OpenStack so that no two agents would need to be written to collect
- the same data. It's primary targets are monitoring and metering, but the
- framework should be easily expandable to collect for other needs. To that
- effect, Ceilometer should be able to share collected data with a variety of
- consumers.
- .
- This charm should be used in conjunction with the ceilometer and nova-compute
- charms to collect OpenStack measurements.
-tags:
- - openstack
- - telemetry
- - misc
-provides:
- nrpe-external-master:
- interface: nrpe-external-master
- scope: container
- nova-ceilometer:
- interface: nova-ceilometer
- scope: container
-requires:
- container:
- interface: juju-info
- scope: container
- ceilometer-service:
- interface: ceilometer
diff --git a/charms/trusty/ceilometer-agent/requirements.txt b/charms/trusty/ceilometer-agent/requirements.txt
deleted file mode 100644
index 426002d..0000000
--- a/charms/trusty/ceilometer-agent/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-PyYAML>=3.1.0
-simplejson>=2.2.0
-netifaces>=0.10.4
-netaddr>=0.7.12,!=0.7.16
-Jinja2>=2.6 # BSD License (3 clause)
-six>=1.9.0
-dnspython>=1.12.0
-psutil>=1.1.1,<2.0.0
diff --git a/charms/trusty/ceilometer-agent/revision b/charms/trusty/ceilometer-agent/revision
deleted file mode 100644
index 209e3ef..0000000
--- a/charms/trusty/ceilometer-agent/revision
+++ /dev/null
@@ -1 +0,0 @@
-20
diff --git a/charms/trusty/ceilometer-agent/setup.cfg b/charms/trusty/ceilometer-agent/setup.cfg
deleted file mode 100644
index bb0670f..0000000
--- a/charms/trusty/ceilometer-agent/setup.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-[nosetests]
-verbosity=1
-with-coverage=1
-cover-erase=1
-cover-package=hooks
-
diff --git a/charms/trusty/ceilometer-agent/templates/icehouse/ceilometer.conf b/charms/trusty/ceilometer-agent/templates/icehouse/ceilometer.conf
deleted file mode 100644
index ed778f0..0000000
--- a/charms/trusty/ceilometer-agent/templates/icehouse/ceilometer.conf
+++ /dev/null
@@ -1,33 +0,0 @@
-# icehouse
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-logdir = /var/log/ceilometer
-{% include "parts/rabbitmq" %}
-{% if use_internal_endpoints -%}
-interface = internal
-{% endif -%}
-
-{% if auth_host -%}
-[service_credentials]
-os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-{% endif -%}
-
-{% if metering_secret -%}
-[publisher_rpc]
-metering_secret = {{ metering_secret }}
-{% endif -%}
-
-[database]
-# NOTE(jamespage) this allows the db sync process to run OK for upgrades
-# fixed in icehouse
-backend=sqlalchemy
-connection=sqlite:////var/lib/ceilometer/$sqlite_db
diff --git a/charms/trusty/ceilometer-agent/templates/parts/rabbitmq b/charms/trusty/ceilometer-agent/templates/parts/rabbitmq
deleted file mode 100644
index bbd0371..0000000
--- a/charms/trusty/ceilometer-agent/templates/parts/rabbitmq
+++ /dev/null
@@ -1,21 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%} \ No newline at end of file
diff --git a/charms/trusty/ceilometer-agent/test-requirements.txt b/charms/trusty/ceilometer-agent/test-requirements.txt
deleted file mode 100644
index 74baa12..0000000
--- a/charms/trusty/ceilometer-agent/test-requirements.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-coverage>=3.6
-mock>=1.2
-flake8>=2.2.4,<=2.4.1
-os-testr>=0.4.1
-charm-tools>=2.0.0
-requests==2.6.0
-# BEGIN: Amulet OpenStack Charm Helper Requirements
-# Liberty client lower constraints
-amulet>=1.14.3,<2.0
-bundletester>=0.6.1,<1.0
-python-ceilometerclient>=1.5.0,<2.0
-python-cinderclient>=1.4.0,<2.0
-python-glanceclient>=1.1.0,<2.0
-python-heatclient>=0.8.0,<1.0
-python-keystoneclient>=1.7.1,<2.0
-python-neutronclient>=3.1.0,<4.0
-python-novaclient>=2.30.1,<3.0
-python-openstackclient>=1.7.0,<2.0
-python-swiftclient>=2.6.0,<3.0
-pika>=0.10.0,<1.0
-distro-info
-# END: Amulet OpenStack Charm Helper Requirements
diff --git a/charms/trusty/ceilometer-agent/tests/README.md b/charms/trusty/ceilometer-agent/tests/README.md
deleted file mode 100644
index 046be7f..0000000
--- a/charms/trusty/ceilometer-agent/tests/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Overview
-
-This directory provides Amulet tests to verify basic deployment functionality
-from the perspective of this charm, its requirements and its features, as
-exercised in a subset of the full OpenStack deployment test bundle topology.
-
-For full details on functional testing of OpenStack charms please refer to
-the [functional testing](http://docs.openstack.org/developer/charm-guide/testing.html#functional-testing)
-section of the OpenStack Charm Guide.
diff --git a/charms/trusty/ceilometer-agent/tests/basic_deployment.py b/charms/trusty/ceilometer-agent/tests/basic_deployment.py
deleted file mode 100644
index 4be7285..0000000
--- a/charms/trusty/ceilometer-agent/tests/basic_deployment.py
+++ /dev/null
@@ -1,678 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import subprocess
-import amulet
-import json
-import time
-import ceilometerclient.v2.client as ceilo_client
-
-from charmhelpers.contrib.openstack.amulet.deployment import (
- OpenStackAmuletDeployment
-)
-
-from charmhelpers.contrib.openstack.amulet.utils import (
- OpenStackAmuletUtils,
- DEBUG,
- # ERROR
-)
-
-# Use DEBUG to turn on debug logging
-u = OpenStackAmuletUtils(DEBUG)
-
-
-class CeiloAgentBasicDeployment(OpenStackAmuletDeployment):
- """Amulet tests on a basic ceilometer-agent deployment."""
-
- def __init__(self, series, openstack=None, source=None, stable=True):
- """Deploy the entire test environment."""
- super(CeiloAgentBasicDeployment, self).__init__(series, openstack,
- source, stable)
- self._add_services()
- self._add_relations()
- self._configure_services()
- self._deploy()
-
- u.log.info('Waiting on extended status checks...')
- exclude_services = ['mysql', 'mongodb']
- self._auto_wait_for_status(exclude_services=exclude_services)
-
- self._initialize_tests()
-
- def _add_services(self):
- """Add services
-
- Add the services that we're testing, where ceilometer is local,
- and the rest of the service are from lp branches that are
- compatible with the local charm (e.g. stable or next).
- """
- # Note: ceilometer-agent becomes a subordinate of nova-compute
- this_service = {'name': 'ceilometer-agent'}
- other_services = [{'name': 'mysql'},
- {'name': 'rabbitmq-server'},
- {'name': 'keystone'},
- {'name': 'mongodb'},
- {'name': 'glance'}, # to satisfy workload status
- {'name': 'ceilometer'},
- {'name': 'nova-compute'}]
- super(CeiloAgentBasicDeployment, self)._add_services(this_service,
- other_services)
-
- def _add_relations(self):
- """Add all of the relations for the services."""
- relations = {
- 'ceilometer:shared-db': 'mongodb:database',
- 'ceilometer:amqp': 'rabbitmq-server:amqp',
- 'ceilometer:identity-service': 'keystone:identity-service',
- 'ceilometer:identity-notifications': 'keystone:'
- 'identity-notifications',
- 'keystone:shared-db': 'mysql:shared-db',
- 'ceilometer:ceilometer-service': 'ceilometer-agent:'
- 'ceilometer-service',
- 'nova-compute:nova-ceilometer': 'ceilometer-agent:nova-ceilometer',
- 'nova-compute:shared-db': 'mysql:shared-db',
- 'nova-compute:amqp': 'rabbitmq-server:amqp',
- 'glance:identity-service': 'keystone:identity-service',
- 'glance:shared-db': 'mysql:shared-db',
- 'glance:amqp': 'rabbitmq-server:amqp',
- 'nova-compute:image-service': 'glance:image-service'
- }
- super(CeiloAgentBasicDeployment, self)._add_relations(relations)
-
- def _configure_services(self):
- """Configure all of the services."""
- keystone_config = {'admin-password': 'openstack',
- 'admin-token': 'ubuntutesting'}
- configs = {'keystone': keystone_config}
- super(CeiloAgentBasicDeployment, self)._configure_services(configs)
-
- def _get_token(self):
- return self.keystone.service_catalog.catalog['token']['id']
-
- def _initialize_tests(self):
- """Perform final initialization before tests get run."""
- # Access the sentries for inspecting service units
- self.ceil_sentry = self.d.sentry['ceilometer'][0]
- self.ceil_agent_sentry = self.d.sentry['ceilometer-agent'][0]
- self.mysql_sentry = self.d.sentry['mysql'][0]
- self.keystone_sentry = self.d.sentry['keystone'][0]
- self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]
- self.mongodb_sentry = self.d.sentry['mongodb'][0]
- self.nova_sentry = self.d.sentry['nova-compute'][0]
- u.log.debug('openstack release val: {}'.format(
- self._get_openstack_release()))
- u.log.debug('openstack release str: {}'.format(
- self._get_openstack_release_string()))
-
- # Authenticate admin with keystone endpoint
- self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
- user='admin',
- password='openstack',
- tenant='admin')
-
- # Authenticate admin with ceilometer endpoint
- ep = self.keystone.service_catalog.url_for(service_type='metering',
- endpoint_type='publicURL')
- os_token = self.keystone.auth_token
- self.log.debug('Instantiating ceilometer client...')
- self.ceil = ceilo_client.Client(endpoint=ep, token=os_token)
-
- def _run_action(self, unit_id, action, *args):
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- command.extend(args)
- print("Running command: %s\n" % " ".join(command))
- output = subprocess.check_output(command)
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- action_id = data[u'Action queued with id']
- return action_id
-
- def _wait_on_action(self, action_id):
- command = ["juju", "action", "fetch", "--format=json", action_id]
- while True:
- try:
- output = subprocess.check_output(command)
- except Exception as e:
- print(e)
- return False
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- if data[u"status"] == "completed":
- return True
- elif data[u"status"] == "failed":
- return False
- time.sleep(2)
-
- def test_100_services(self):
- """Verify the expected services are running on the corresponding
- service units."""
- u.log.debug('Checking system services on units...')
-
- ceilometer_svcs = [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-agent-notification',
- ]
-
- if self._get_openstack_release() < self.trusty_mitaka:
- ceilometer_svcs.append('ceilometer-alarm-evaluator')
- ceilometer_svcs.append('ceilometer-alarm-notifier')
-
- service_names = {
- self.ceil_sentry: ceilometer_svcs,
- }
-
- ret = u.validate_services_by_name(service_names)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_110_service_catalog(self):
- """Verify that the service catalog endpoint data is valid."""
- u.log.debug('Checking keystone service catalog data...')
- endpoint_check = {
- 'adminURL': u.valid_url,
- 'id': u.not_null,
- 'region': 'RegionOne',
- 'publicURL': u.valid_url,
- 'internalURL': u.valid_url
- }
- expected = {
- 'metering': [endpoint_check],
- 'identity': [endpoint_check]
- }
- actual = self.keystone.service_catalog.get_endpoints()
-
- ret = u.validate_svc_catalog_endpoint_data(expected, actual)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_112_keystone_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking keystone api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- internal_port = public_port = '5000'
- admin_port = '35357'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Keystone endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_114_ceilometer_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking ceilometer api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- admin_port = internal_port = public_port = '8777'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Ceilometer endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_200_ceilometer_identity_relation(self):
- """Verify the ceilometer to keystone identity-service relation data"""
- u.log.debug('Checking ceilometer to keystone identity-service '
- 'relation data...')
- unit = self.ceil_sentry
- relation = ['identity-service', 'keystone:identity-service']
- ceil_ip = unit.relation('identity-service',
- 'keystone:identity-service')['private-address']
- ceil_endpoint = "http://%s:8777" % (ceil_ip)
-
- expected = {
- 'admin_url': ceil_endpoint,
- 'internal_url': ceil_endpoint,
- 'private-address': ceil_ip,
- 'public_url': ceil_endpoint,
- 'region': 'RegionOne',
- 'requested_roles': 'ResellerAdmin',
- 'service': 'ceilometer',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_201_keystone_ceilometer_identity_relation(self):
- """Verify the keystone to ceilometer identity-service relation data"""
- u.log.debug('Checking keystone:ceilometer identity relation data...')
- unit = self.keystone_sentry
- relation = ['identity-service', 'ceilometer:identity-service']
- id_relation = unit.relation('identity-service',
- 'ceilometer:identity-service')
- id_ip = id_relation['private-address']
- expected = {
- 'admin_token': 'ubuntutesting',
- 'auth_host': id_ip,
- 'auth_port': "35357",
- 'auth_protocol': 'http',
- 'private-address': id_ip,
- 'service_host': id_ip,
- 'service_password': u.not_null,
- 'service_port': "5000",
- 'service_protocol': 'http',
- 'service_tenant': 'services',
- 'service_tenant_id': u.not_null,
- 'service_username': 'ceilometer',
- }
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('keystone identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_202_keystone_ceilometer_identity_notes_relation(self):
- """Verify ceilometer to keystone identity-notifications relation"""
- u.log.debug('Checking keystone:ceilometer '
- 'identity-notifications relation data...')
-
- # Relation data may vary depending on timing of hooks and relations.
- # May be glance- or keystone- or another endpoint-changed value, so
- # check that at least one ???-endpoint-changed value exists.
- unit = self.keystone_sentry
- relation_data = unit.relation('identity-service',
- 'ceilometer:identity-notifications')
-
- expected = '-endpoint-changed'
- found = 0
- for key in relation_data.keys():
- if expected in key and relation_data[key]:
- found += 1
- u.log.debug('{}: {}'.format(key, relation_data[key]))
-
- if not found:
- message = ('keystone:ceilometer identity-notification relation '
- 'error\n expected something like: {}\n actual: '
- '{}'.format(expected, relation_data))
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_203_ceilometer_amqp_relation(self):
- """Verify the ceilometer to rabbitmq-server amqp relation data"""
- u.log.debug('Checking ceilometer:rabbitmq amqp relation data...')
- unit = self.ceil_sentry
- relation = ['amqp', 'rabbitmq-server:amqp']
- expected = {
- 'username': 'ceilometer',
- 'private-address': u.valid_ip,
- 'vhost': 'openstack'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_204_amqp_ceilometer_relation(self):
- """Verify the rabbitmq-server to ceilometer amqp relation data"""
- u.log.debug('Checking rabbitmq:ceilometer amqp relation data...')
- unit = self.rabbitmq_sentry
- relation = ['amqp', 'ceilometer:amqp']
- expected = {
- 'hostname': u.valid_ip,
- 'private-address': u.valid_ip,
- 'password': u.not_null,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('rabbitmq amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_205_ceilometer_to_mongodb_relation(self):
- """Verify the ceilometer to mongodb relation data"""
- u.log.debug('Checking ceilometer:mongodb relation data...')
- unit = self.ceil_sentry
- relation = ['shared-db', 'mongodb:database']
- expected = {
- 'ceilometer_database': 'ceilometer',
- 'private-address': u.valid_ip,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer shared-db', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_206_mongodb_to_ceilometer_relation(self):
- """Verify the mongodb to ceilometer relation data"""
- u.log.debug('Checking mongodb:ceilometer relation data...')
- unit = self.mongodb_sentry
- relation = ['database', 'ceilometer:shared-db']
- expected = {
- 'hostname': u.valid_ip,
- 'port': '27017',
- 'private-address': u.valid_ip,
- 'type': 'database',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('mongodb database', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_207_ceilometer_ceilometer_agent_relation(self):
- """Verify the ceilometer to ceilometer-agent relation data"""
- u.log.debug('Checking ceilometer:ceilometer-agent relation data...')
- unit = self.ceil_sentry
- relation = ['ceilometer-service',
- 'ceilometer-agent:ceilometer-service']
- expected = {
- 'rabbitmq_user': 'ceilometer',
- 'verbose': 'False',
- 'rabbitmq_host': u.valid_ip,
- 'service_ports': "{'ceilometer_api': [8777, 8767]}",
- 'use_syslog': 'False',
- 'metering_secret': u.not_null,
- 'rabbitmq_virtual_host': 'openstack',
- 'db_port': '27017',
- 'private-address': u.valid_ip,
- 'db_name': 'ceilometer',
- 'db_host': u.valid_ip,
- 'debug': 'False',
- 'rabbitmq_password': u.not_null,
- 'port': '8767'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_208_ceilometer_agent_ceilometer_relation(self):
- """Verify the ceilometer-agent to ceilometer relation data"""
- u.log.debug('Checking ceilometer-agent:ceilometer relation data...')
- unit = self.ceil_agent_sentry
- relation = ['ceilometer-service', 'ceilometer:ceilometer-service']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_209_nova_compute_ceilometer_agent_relation(self):
- """Verify the nova-compute to ceilometer relation data"""
- u.log.debug('Checking nova-compute:ceilometer relation data...')
- unit = self.nova_sentry
- relation = ['nova-ceilometer', 'ceilometer-agent:nova-ceilometer']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_210_ceilometer_agent_nova_compute_relation(self):
- """Verify the ceilometer to nova-compute relation data"""
- u.log.debug('Checking ceilometer:nova-compute relation data...')
- unit = self.ceil_agent_sentry
- relation = ['nova-ceilometer', 'nova-compute:nova-ceilometer']
- sub = ('{"nova": {"/etc/nova/nova.conf": {"sections": {"DEFAULT": '
- '[["instance_usage_audit", "True"], '
- '["instance_usage_audit_period", "hour"], '
- '["notify_on_state_change", "vm_and_task_state"], '
- '["notification_driver", "ceilometer.compute.nova_notifier"], '
- '["notification_driver", '
- '"nova.openstack.common.notifier.rpc_notifier"]]}}}}')
- expected = {
- 'subordinate_configuration': sub,
- 'private-address': u.valid_ip
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_300_ceilometer_config(self):
- """Verify the data in the ceilometer config file."""
- u.log.debug('Checking ceilometer config file data...')
- unit = self.ceil_sentry
- ks_rel = self.keystone_sentry.relation('identity-service',
- 'ceilometer:identity-service')
- auth_uri = '%s://%s:%s/' % (ks_rel['service_protocol'],
- ks_rel['service_host'],
- ks_rel['service_port'])
- db_relation = self.mongodb_sentry.relation('database',
- 'ceilometer:shared-db')
- db_conn = 'mongodb://%s:%s/ceilometer' % (db_relation['hostname'],
- db_relation['port'])
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- },
- 'api': {
- 'port': '8767',
- },
- 'service_credentials': {
- 'os_auth_url': auth_uri + 'v2.0',
- 'os_tenant_name': 'services',
- 'os_username': 'ceilometer',
- 'os_password': ks_rel['service_password'],
- },
- 'database': {
- 'connection': db_conn,
- },
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_301_nova_config(self):
- """Verify data in the nova compute nova config file"""
- u.log.debug('Checking nova compute config file...')
- unit = self.nova_sentry
- conf = '/etc/nova/nova.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- 'my_ip': u.valid_ip,
- }
- }
-
- # NOTE(beisner): notification_driver is not checked like the
- # others, as configparser does not support duplicate config
- # options, and dicts cant have duplicate keys.
- # Ex. from conf file:
- # notification_driver = ceilometer.compute.nova_notifier
- # notification_driver = nova.openstack.common.notifier.rpc_notifier
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- # Check notification_driver existence via simple grep cmd
- lines = [('notification_driver = '
- 'ceilometer.compute.nova_notifier'),
- ('notification_driver = '
- 'nova.openstack.common.notifier.rpc_notifier')]
-
- sentry_units = [unit]
- cmds = []
- for line in lines:
- cmds.append('grep "{}" {}'.format(line, conf))
-
- ret = u.check_commands_on_units(cmds, sentry_units)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_302_nova_ceilometer_config(self):
- """Verify data in the ceilometer config file on the
- nova-compute (ceilometer-agent) unit."""
- u.log.debug('Checking nova ceilometer config file...')
- unit = self.nova_sentry
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'logdir': '/var/log/ceilometer'
- },
- 'database': {
- 'backend': 'sqlalchemy',
- 'connection': 'sqlite:////var/lib/ceilometer/$sqlite_db'
- }
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_400_api_connection(self):
- """Simple api calls to check service is up and responding"""
- u.log.debug('Checking api functionality...')
- assert(self.ceil.samples.list() == [])
- assert(self.ceil.meters.list() == [])
- u.log.debug('OK')
-
- # NOTE(beisner): need to add more functional tests
-
- def test_900_restart_on_config_change(self):
- """Verify that the specified services are restarted when the config
- is changed.
- """
- sentry = self.ceil_sentry
- juju_service = 'ceilometer'
-
- # Expected default and alternate values
- set_default = {'debug': 'False'}
- set_alternate = {'debug': 'True'}
-
- # Services which are expected to restart upon config change,
- # and corresponding config files affected by the change
- conf_file = '/etc/ceilometer/ceilometer.conf'
- services = {
- 'ceilometer-collector': conf_file,
- 'ceilometer-api': conf_file,
- 'ceilometer-agent-notification': conf_file,
- }
-
- if self._get_openstack_release() < self.trusty_mitaka:
- services['ceilometer-alarm-notifier'] = conf_file
- services['ceilometer-alarm-evaluator'] = conf_file
-
- if self._get_openstack_release() == self.trusty_liberty or \
- self._get_openstack_release() >= self.wily_liberty:
- # Liberty and later
- services['ceilometer-polling'] = conf_file
- else:
- # Juno and earlier
- services['ceilometer-agent-central'] = conf_file
-
- # Make config change, check for service restarts
- u.log.debug('Making config change on {}...'.format(juju_service))
- mtime = u.get_sentry_time(sentry)
- self.d.configure(juju_service, set_alternate)
-
- sleep_time = 40
- for s, conf_file in services.iteritems():
- u.log.debug("Checking that service restarted: {}".format(s))
- if not u.validate_service_config_changed(sentry, mtime, s,
- conf_file,
- retry_count=4,
- retry_sleep_time=20,
- sleep_time=sleep_time):
- self.d.configure(juju_service, set_default)
- msg = "service {} didn't restart after config change".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
- sleep_time = 0
-
- self.d.configure(juju_service, set_default)
- u.log.debug('OK')
-
- def test_910_pause_and_resume(self):
- """The services can be paused and resumed. """
- u.log.debug('Checking pause and resume actions...')
- unit = self.d.sentry['ceilometer-agent'][0]
- unit_name = unit.info['unit_name']
-
- u.log.debug('Checking for active status on {}'.format(unit_name))
- assert u.status_get(unit)[0] == "active"
-
- u.log.debug('Running pause action on {}'.format(unit_name))
- action_id = self._run_action(unit_name, "pause")
- u.log.debug('Waiting on action {}'.format(action_id))
- assert self._wait_on_action(action_id), "Pause action failed."
- u.log.debug('Checking for maintenance status on {}'.format(unit_name))
- assert u.status_get(unit)[0] == "maintenance"
-
- u.log.debug('Running resume action on {}'.format(unit_name))
- action_id = self._run_action(unit_name, "resume")
- u.log.debug('Waiting on action {}'.format(action_id))
- assert self._wait_on_action(action_id), "Resume action failed."
- u.log.debug('Checking for active status on {}'.format(unit_name))
- assert u.status_get(unit)[0] == "active"
- u.log.debug('OK')
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/__init__.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/__init__.py
deleted file mode 100644
index 4886788..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/__init__.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/deployment.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/deployment.py
deleted file mode 100644
index 9c65518..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/deployment.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import os
-import six
-
-
-class AmuletDeployment(object):
- """Amulet deployment.
-
- This class provides generic Amulet deployment and test runner
- methods.
- """
-
- def __init__(self, series=None):
- """Initialize the deployment environment."""
- self.series = None
-
- if series:
- self.series = series
- self.d = amulet.Deployment(series=self.series)
- else:
- self.d = amulet.Deployment()
-
- def _add_services(self, this_service, other_services):
- """Add services.
-
- Add services to the deployment where this_service is the local charm
- that we're testing and other_services are the other services that
- are being used in the local amulet tests.
- """
- if this_service['name'] != os.path.basename(os.getcwd()):
- s = this_service['name']
- msg = "The charm's root directory name needs to be {}".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- if 'units' not in this_service:
- this_service['units'] = 1
-
- self.d.add(this_service['name'], units=this_service['units'],
- constraints=this_service.get('constraints'))
-
- for svc in other_services:
- if 'location' in svc:
- branch_location = svc['location']
- elif self.series:
- branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
- else:
- branch_location = None
-
- if 'units' not in svc:
- svc['units'] = 1
-
- self.d.add(svc['name'], charm=branch_location, units=svc['units'],
- constraints=svc.get('constraints'))
-
- def _add_relations(self, relations):
- """Add all of the relations for the services."""
- for k, v in six.iteritems(relations):
- self.d.relate(k, v)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _deploy(self):
- """Deploy environment and wait for all hooks to finish executing."""
- timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
- try:
- self.d.setup(timeout=timeout)
- self.d.sentry.wait(timeout=timeout)
- except amulet.helpers.TimeoutError:
- amulet.raise_status(
- amulet.FAIL,
- msg="Deployment timed out ({}s)".format(timeout)
- )
- except Exception:
- raise
-
- def run_tests(self):
- """Run all of the methods that are prefixed with 'test_'."""
- for test in dir(self):
- if test.startswith('test_'):
- getattr(self, test)()
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/utils.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/utils.py
deleted file mode 100644
index a39ed4c..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/amulet/utils.py
+++ /dev/null
@@ -1,827 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import io
-import json
-import logging
-import os
-import re
-import socket
-import subprocess
-import sys
-import time
-import uuid
-
-import amulet
-import distro_info
-import six
-from six.moves import configparser
-if six.PY3:
- from urllib import parse as urlparse
-else:
- import urlparse
-
-
-class AmuletUtils(object):
- """Amulet utilities.
-
- This class provides common utility functions that are used by Amulet
- tests.
- """
-
- def __init__(self, log_level=logging.ERROR):
- self.log = self.get_logger(level=log_level)
- self.ubuntu_releases = self.get_ubuntu_releases()
-
- def get_logger(self, name="amulet-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def valid_ip(self, ip):
- if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
- return True
- else:
- return False
-
- def valid_url(self, url):
- p = re.compile(
- r'^(?:http|ftp)s?://'
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
- r'localhost|'
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
- r'(?::\d+)?'
- r'(?:/?|[/?]\S+)$',
- re.IGNORECASE)
- if p.match(url):
- return True
- else:
- return False
-
- def get_ubuntu_release_from_sentry(self, sentry_unit):
- """Get Ubuntu release codename from sentry unit.
-
- :param sentry_unit: amulet sentry/service unit pointer
- :returns: list of strings - release codename, failure message
- """
- msg = None
- cmd = 'lsb_release -cs'
- release, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} lsb_release: {}'.format(
- sentry_unit.info['unit_name'], release))
- else:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, release, code))
- if release not in self.ubuntu_releases:
- msg = ("Release ({}) not found in Ubuntu releases "
- "({})".format(release, self.ubuntu_releases))
- return release, msg
-
- def validate_services(self, commands):
- """Validate that lists of commands succeed on service units. Can be
- used to verify system services are running on the corresponding
- service units.
-
- :param commands: dict with sentry keys and arbitrary command list vals
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # /!\ DEPRECATION WARNING (beisner):
- # New and existing tests should be rewritten to use
- # validate_services_by_name() as it is aware of init systems.
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_services_by_name instead of validate_services '
- 'due to init system differences.')
-
- for k, v in six.iteritems(commands):
- for cmd in v:
- output, code = k.run(cmd)
- self.log.debug('{} `{}` returned '
- '{}'.format(k.info['unit_name'],
- cmd, code))
- if code != 0:
- return "command `{}` returned {}".format(cmd, str(code))
- return None
-
- def validate_services_by_name(self, sentry_services):
- """Validate system service status by service name, automatically
- detecting init system based on Ubuntu release codename.
-
- :param sentry_services: dict with sentry keys and svc list values
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # Point at which systemd became a thing
- systemd_switch = self.ubuntu_releases.index('vivid')
-
- for sentry_unit, services_list in six.iteritems(sentry_services):
- # Get lsb_release codename from unit
- release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
- if ret:
- return ret
-
- for service_name in services_list:
- if (self.ubuntu_releases.index(release) >= systemd_switch or
- service_name in ['rabbitmq-server', 'apache2']):
- # init is systemd (or regular sysv)
- cmd = 'sudo service {} status'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0
- elif self.ubuntu_releases.index(release) < systemd_switch:
- # init is upstart
- cmd = 'sudo status {}'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0 and "start/running" in output
-
- self.log.debug('{} `{}` returned '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code))
- if not service_running:
- return u"command `{}` returned {} {}".format(
- cmd, output, str(code))
- return None
-
- def _get_config(self, unit, filename):
- """Get a ConfigParser object for parsing a unit's config file."""
- file_contents = unit.file_contents(filename)
-
- # NOTE(beisner): by default, ConfigParser does not handle options
- # with no value, such as the flags used in the mysql my.cnf file.
- # https://bugs.python.org/issue7005
- config = configparser.ConfigParser(allow_no_value=True)
- config.readfp(io.StringIO(file_contents))
- return config
-
- def validate_config_data(self, sentry_unit, config_file, section,
- expected):
- """Validate config file data.
-
- Verify that the specified section of the config file contains
- the expected option key:value pairs.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('Validating config file data ({} in {} on {})'
- '...'.format(section, config_file,
- sentry_unit.info['unit_name']))
- config = self._get_config(sentry_unit, config_file)
-
- if section != 'DEFAULT' and not config.has_section(section):
- return "section [{}] does not exist".format(section)
-
- for k in expected.keys():
- if not config.has_option(section, k):
- return "section [{}] is missing option {}".format(section, k)
-
- actual = config.get(section, k)
- v = expected[k]
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if actual != v:
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual):
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- return None
-
- def _validate_dict_data(self, expected, actual):
- """Validate dictionary data.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('actual: {}'.format(repr(actual)))
- self.log.debug('expected: {}'.format(repr(expected)))
-
- for k, v in six.iteritems(expected):
- if k in actual:
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if v != actual[k]:
- return "{}:{}".format(k, actual[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual[k]):
- return "{}:{}".format(k, actual[k])
- else:
- return "key '{}' does not exist".format(k)
- return None
-
- def validate_relation_data(self, sentry_unit, relation, expected):
- """Validate actual relation data based on expected relation data."""
- actual = sentry_unit.relation(relation[0], relation[1])
- return self._validate_dict_data(expected, actual)
-
- def _validate_list_data(self, expected, actual):
- """Compare expected list vs actual list data."""
- for e in expected:
- if e not in actual:
- return "expected item {} not found in actual list".format(e)
- return None
-
- def not_null(self, string):
- if string is not None:
- return True
- else:
- return False
-
- def _get_file_mtime(self, sentry_unit, filename):
- """Get last modification time of file."""
- return sentry_unit.file_stat(filename)['mtime']
-
- def _get_dir_mtime(self, sentry_unit, directory):
- """Get last modification time of directory."""
- return sentry_unit.directory_stat(directory)['mtime']
-
- def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
- """Get start time of a process based on the last modification time
- of the /proc/pid directory.
-
- :sentry_unit: The sentry unit to check for the service on
- :service: service name to look for in process table
- :pgrep_full: [Deprecated] Use full command line search mode with pgrep
- :returns: epoch time of service process start
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- if pgrep_full is not None:
- # /!\ DEPRECATION WARNING (beisner):
- # No longer implemented, as pidof is now used instead of pgrep.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
- 'longer implemented re: lp 1474030.')
-
- pid_list = self.get_process_id_list(sentry_unit, service)
- pid = pid_list[0]
- proc_dir = '/proc/{}'.format(pid)
- self.log.debug('Pid for {} on {}: {}'.format(
- service, sentry_unit.info['unit_name'], pid))
-
- return self._get_dir_mtime(sentry_unit, proc_dir)
-
- def service_restarted(self, sentry_unit, service, filename,
- pgrep_full=None, sleep_time=20):
- """Check if service was restarted.
-
- Compare a service's start time vs a file's last modification time
- (such as a config file for that service) to determine if the service
- has been restarted.
- """
- # /!\ DEPRECATION WARNING (beisner):
- # This method is prone to races in that no before-time is known.
- # Use validate_service_config_changed instead.
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_service_config_changed instead of '
- 'service_restarted due to known races.')
-
- time.sleep(sleep_time)
- if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
- self._get_file_mtime(sentry_unit, filename)):
- return True
- else:
- return False
-
- def service_restarted_since(self, sentry_unit, mtime, service,
- pgrep_full=None, sleep_time=20,
- retry_count=30, retry_sleep_time=10):
- """Check if service was been started after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if service found and its start time it newer than mtime,
- False if service is older than mtime or if service was
- not found.
- """
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s service restarted since %s on '
- '%s' % (service, mtime, unit_name))
- time.sleep(sleep_time)
- proc_start_time = None
- tries = 0
- while tries <= retry_count and not proc_start_time:
- try:
- proc_start_time = self._get_proc_start_time(sentry_unit,
- service,
- pgrep_full)
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'OK'.format(tries, service, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, proc may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'failed\n{}'.format(tries, service,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not proc_start_time:
- self.log.warn('No proc start time found, assuming service did '
- 'not start')
- return False
- if proc_start_time >= mtime:
- self.log.debug('Proc start time is newer than provided mtime'
- '(%s >= %s) on %s (OK)' % (proc_start_time,
- mtime, unit_name))
- return True
- else:
- self.log.warn('Proc start time (%s) is older than provided mtime '
- '(%s) on %s, service did not '
- 'restart' % (proc_start_time, mtime, unit_name))
- return False
-
- def config_updated_since(self, sentry_unit, filename, mtime,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check if file was modified after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check the file mtime on
- filename (string): The file to check mtime of
- mtime (float): The epoch time to check against
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if file was modified more recently than mtime, False if
- file was modified before mtime, or if file not found.
- """
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s updated since %s on '
- '%s' % (filename, mtime, unit_name))
- time.sleep(sleep_time)
- file_mtime = None
- tries = 0
- while tries <= retry_count and not file_mtime:
- try:
- file_mtime = self._get_file_mtime(sentry_unit, filename)
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'OK'.format(tries, filename, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, file may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'failed\n{}'.format(tries, filename,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not file_mtime:
- self.log.warn('Could not determine file mtime, assuming '
- 'file does not exist')
- return False
-
- if file_mtime >= mtime:
- self.log.debug('File mtime is newer than provided mtime '
- '(%s >= %s) on %s (OK)' % (file_mtime,
- mtime, unit_name))
- return True
- else:
- self.log.warn('File mtime is older than provided mtime'
- '(%s < on %s) on %s' % (file_mtime,
- mtime, unit_name))
- return False
-
- def validate_service_config_changed(self, sentry_unit, mtime, service,
- filename, pgrep_full=None,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check service and file were updated after mtime
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- filename (string): The file to check mtime of
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep in seconds to pass to test helpers
- retry_count (int): If service is not found, how many times to retry
- retry_sleep_time (int): Time in seconds to wait between retries
-
- Typical Usage:
- u = OpenStackAmuletUtils(ERROR)
- ...
- mtime = u.get_sentry_time(self.cinder_sentry)
- self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
- if not u.validate_service_config_changed(self.cinder_sentry,
- mtime,
- 'cinder-api',
- '/etc/cinder/cinder.conf')
- amulet.raise_status(amulet.FAIL, msg='update failed')
- Returns:
- bool: True if both service and file where updated/restarted after
- mtime, False if service is older than mtime or if service was
- not found or if filename was modified before mtime.
- """
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- service_restart = self.service_restarted_since(
- sentry_unit, mtime,
- service,
- pgrep_full=pgrep_full,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- config_update = self.config_updated_since(
- sentry_unit,
- filename,
- mtime,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- return service_restart and config_update
-
- def get_sentry_time(self, sentry_unit):
- """Return current epoch time on a sentry"""
- cmd = "date +'%s'"
- return float(sentry_unit.run(cmd)[0])
-
- def relation_error(self, name, data):
- return 'unexpected relation data in {} - {}'.format(name, data)
-
- def endpoint_error(self, name, data):
- return 'unexpected endpoint data in {} - {}'.format(name, data)
-
- def get_ubuntu_releases(self):
- """Return a list of all Ubuntu releases in order of release."""
- _d = distro_info.UbuntuDistroInfo()
- _release_list = _d.all
- return _release_list
-
- def file_to_url(self, file_rel_path):
- """Convert a relative file path to a file URL."""
- _abs_path = os.path.abspath(file_rel_path)
- return urlparse.urlparse(_abs_path, scheme='file').geturl()
-
- def check_commands_on_units(self, commands, sentry_units):
- """Check that all commands in a list exit zero on all
- sentry units in a list.
-
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- self.log.debug('Checking exit codes for {} commands on {} '
- 'sentry units...'.format(len(commands),
- len(sentry_units)))
- for sentry_unit in sentry_units:
- for cmd in commands:
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- return ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- return None
-
- def get_process_id_list(self, sentry_unit, process_name,
- expect_success=True):
- """Get a list of process ID(s) from a single sentry juju unit
- for a single process name.
-
- :param sentry_unit: Amulet sentry instance (juju unit)
- :param process_name: Process name
- :param expect_success: If False, expect the PID to be missing,
- raise if it is present.
- :returns: List of process IDs
- """
- cmd = 'pidof -x {}'.format(process_name)
- if not expect_success:
- cmd += " || exit 0 && exit 1"
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output).split()
-
- def get_unit_process_ids(self, unit_processes, expect_success=True):
- """Construct a dict containing unit sentries, process names, and
- process IDs.
-
- :param unit_processes: A dictionary of Amulet sentry instance
- to list of process names.
- :param expect_success: if False expect the processes to not be
- running, raise if they are.
- :returns: Dictionary of Amulet sentry instance to dictionary
- of process names to PIDs.
- """
- pid_dict = {}
- for sentry_unit, process_list in six.iteritems(unit_processes):
- pid_dict[sentry_unit] = {}
- for process in process_list:
- pids = self.get_process_id_list(
- sentry_unit, process, expect_success=expect_success)
- pid_dict[sentry_unit].update({process: pids})
- return pid_dict
-
- def validate_unit_process_ids(self, expected, actual):
- """Validate process id quantities for services on units."""
- self.log.debug('Checking units for running processes...')
- self.log.debug('Expected PIDs: {}'.format(expected))
- self.log.debug('Actual PIDs: {}'.format(actual))
-
- if len(actual) != len(expected):
- return ('Unit count mismatch. expected, actual: {}, '
- '{} '.format(len(expected), len(actual)))
-
- for (e_sentry, e_proc_names) in six.iteritems(expected):
- e_sentry_name = e_sentry.info['unit_name']
- if e_sentry in actual.keys():
- a_proc_names = actual[e_sentry]
- else:
- return ('Expected sentry ({}) not found in actual dict data.'
- '{}'.format(e_sentry_name, e_sentry))
-
- if len(e_proc_names.keys()) != len(a_proc_names.keys()):
- return ('Process name count mismatch. expected, actual: {}, '
- '{}'.format(len(expected), len(actual)))
-
- for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
- zip(e_proc_names.items(), a_proc_names.items()):
- if e_proc_name != a_proc_name:
- return ('Process name mismatch. expected, actual: {}, '
- '{}'.format(e_proc_name, a_proc_name))
-
- a_pids_length = len(a_pids)
- fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
- '{}, {} ({})'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids_length,
- a_pids))
-
- # If expected is a list, ensure at least one PID quantity match
- if isinstance(e_pids, list) and \
- a_pids_length not in e_pids:
- return fail_msg
- # If expected is not bool and not list,
- # ensure PID quantities match
- elif not isinstance(e_pids, bool) and \
- not isinstance(e_pids, list) and \
- a_pids_length != e_pids:
- return fail_msg
- # If expected is bool True, ensure 1 or more PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is True and a_pids_length < 1:
- return fail_msg
- # If expected is bool False, ensure 0 PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is False and a_pids_length != 0:
- return fail_msg
- else:
- self.log.debug('PID check OK: {} {} {}: '
- '{}'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids))
- return None
-
- def validate_list_of_identical_dicts(self, list_of_dicts):
- """Check that all dicts within a list are identical."""
- hashes = []
- for _dict in list_of_dicts:
- hashes.append(hash(frozenset(_dict.items())))
-
- self.log.debug('Hashes: {}'.format(hashes))
- if len(set(hashes)) == 1:
- self.log.debug('Dicts within list are identical')
- else:
- return 'Dicts within list are not identical'
-
- return None
-
- def validate_sectionless_conf(self, file_contents, expected):
- """A crude conf parser. Useful to inspect configuration files which
- do not have section headers (as would be necessary in order to use
- the configparser). Such as openstack-dashboard or rabbitmq confs."""
- for line in file_contents.split('\n'):
- if '=' in line:
- args = line.split('=')
- if len(args) <= 1:
- continue
- key = args[0].strip()
- value = args[1].strip()
- if key in expected.keys():
- if expected[key] != value:
- msg = ('Config mismatch. Expected, actual: {}, '
- '{}'.format(expected[key], value))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def get_unit_hostnames(self, units):
- """Return a dict of juju unit names to hostnames."""
- host_names = {}
- for unit in units:
- host_names[unit.info['unit_name']] = \
- str(unit.file_contents('/etc/hostname').strip())
- self.log.debug('Unit host names: {}'.format(host_names))
- return host_names
-
- def run_cmd_unit(self, sentry_unit, cmd):
- """Run a command on a unit, return the output and exit code."""
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` command returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- msg = ('{} `{}` command returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output), code
-
- def file_exists_on_unit(self, sentry_unit, file_name):
- """Check if a file exists on a unit."""
- try:
- sentry_unit.file_stat(file_name)
- return True
- except IOError:
- return False
- except Exception as e:
- msg = 'Error checking file {}: {}'.format(file_name, e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def file_contents_safe(self, sentry_unit, file_name,
- max_wait=60, fatal=False):
- """Get file contents from a sentry unit. Wrap amulet file_contents
- with retry logic to address races where a file checks as existing,
- but no longer exists by the time file_contents is called.
- Return None if file not found. Optionally raise if fatal is True."""
- unit_name = sentry_unit.info['unit_name']
- file_contents = False
- tries = 0
- while not file_contents and tries < (max_wait / 4):
- try:
- file_contents = sentry_unit.file_contents(file_name)
- except IOError:
- self.log.debug('Attempt {} to open file {} from {} '
- 'failed'.format(tries, file_name,
- unit_name))
- time.sleep(4)
- tries += 1
-
- if file_contents:
- return file_contents
- elif not fatal:
- return None
- elif fatal:
- msg = 'Failed to get file contents from unit.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def port_knock_tcp(self, host="localhost", port=22, timeout=15):
- """Open a TCP socket to check for a listening sevice on a host.
-
- :param host: host name or IP address, default to localhost
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :returns: True if successful, False if connect failed
- """
-
- # Resolve host name if possible
- try:
- connect_host = socket.gethostbyname(host)
- host_human = "{} ({})".format(connect_host, host)
- except socket.error as e:
- self.log.warn('Unable to resolve address: '
- '{} ({}) Trying anyway!'.format(host, e))
- connect_host = host
- host_human = connect_host
-
- # Attempt socket connection
- try:
- knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- knock.settimeout(timeout)
- knock.connect((connect_host, port))
- knock.close()
- self.log.debug('Socket connect OK for host '
- '{} on port {}.'.format(host_human, port))
- return True
- except socket.error as e:
- self.log.debug('Socket connect FAIL for'
- ' {} port {} ({})'.format(host_human, port, e))
- return False
-
- def port_knock_units(self, sentry_units, port=22,
- timeout=15, expect_success=True):
- """Open a TCP socket to check for a listening sevice on each
- listed juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :expect_success: True by default, set False to invert logic
- :returns: None if successful, Failure message otherwise
- """
- for unit in sentry_units:
- host = unit.info['public-address']
- connected = self.port_knock_tcp(host, port, timeout)
- if not connected and expect_success:
- return 'Socket connect failed.'
- elif connected and not expect_success:
- return 'Socket connected unexpectedly.'
-
- def get_uuid_epoch_stamp(self):
- """Returns a stamp string based on uuid4 and epoch time. Useful in
- generating test messages which need to be unique-ish."""
- return '[{}-{}]'.format(uuid.uuid4(), time.time())
-
-# amulet juju action helpers:
- def run_action(self, unit_sentry, action,
- _check_output=subprocess.check_output,
- params=None):
- """Run the named action on a given unit sentry.
-
- params a dict of parameters to use
- _check_output parameter is used for dependency injection.
-
- @return action_id.
- """
- unit_id = unit_sentry.info["unit_name"]
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- if params is not None:
- for key, value in params.iteritems():
- command.append("{}={}".format(key, value))
- self.log.info("Running command: %s\n" % " ".join(command))
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- action_id = data[u'Action queued with id']
- return action_id
-
- def wait_on_action(self, action_id, _check_output=subprocess.check_output):
- """Wait for a given action, returning if it completed or not.
-
- _check_output parameter is used for dependency injection.
- """
- command = ["juju", "action", "fetch", "--format=json", "--wait=0",
- action_id]
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- return data.get(u"status") == "completed"
-
- def status_get(self, unit):
- """Return the current service status of this unit."""
- raw_status, return_code = unit.run(
- "status-get --format=json --include-data")
- if return_code != 0:
- return ("unknown", "")
- status = json.loads(raw_status)
- return (status["status"], status["message"])
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 6ce91db..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,295 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the ~openstack-charmers
- base_charms = {
- 'mysql': ['precise', 'trusty'],
- 'mongodb': ['precise', 'trusty'],
- 'nrpe': ['precise', 'trusty', 'wily', 'xenial'],
- }
-
- for svc in other_services:
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if svc['name'] in base_charms:
- # NOTE: not all charms have support for all series we
- # want/need to test against, so fix to most recent
- # that each base charm supports
- target_series = self.series
- if self.series not in base_charms[svc['name']]:
- target_series = base_charms[svc['name']][-1]
- svc['location'] = 'cs:{}/{}'.format(target_series,
- svc['name'])
- elif self.stable:
- svc['location'] = 'cs:{}/{}'.format(self.series,
- svc['name'])
- else:
- svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
- self.series,
- svc['name']
- )
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon', 'ceph-proxy']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index 8040b57..0000000
--- a/charms/trusty/ceilometer-agent/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1010 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer-agent/tests/dev-basic-xenial-newton b/charms/trusty/ceilometer-agent/tests/dev-basic-xenial-newton
deleted file mode 100755
index a1732e4..0000000
--- a/charms/trusty/ceilometer-agent/tests/dev-basic-xenial-newton
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on xenial-newton."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='xenial',
- openstack='cloud:xenial-newton',
- source='cloud:xenial-updates/newton')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/dev-basic-yakkety-newton b/charms/trusty/ceilometer-agent/tests/dev-basic-yakkety-newton
deleted file mode 100755
index 1099697..0000000
--- a/charms/trusty/ceilometer-agent/tests/dev-basic-yakkety-newton
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on yakkety-newton."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='yakkety')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/gate-basic-precise-icehouse b/charms/trusty/ceilometer-agent/tests/gate-basic-precise-icehouse
deleted file mode 100755
index eedfb05..0000000
--- a/charms/trusty/ceilometer-agent/tests/gate-basic-precise-icehouse
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on precise-icehouse."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='precise',
- openstack='cloud:precise-icehouse',
- source='cloud:precise-updates/icehouse')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-icehouse b/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-icehouse
deleted file mode 100755
index 4dfc5af..0000000
--- a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-icehouse
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on trusty-icehouse."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='trusty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-kilo b/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-kilo
deleted file mode 100755
index 61d632c..0000000
--- a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-kilo
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on trusty-kilo."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='trusty',
- openstack='cloud:trusty-kilo',
- source='cloud:trusty-updates/kilo')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-liberty b/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-liberty
deleted file mode 100755
index 0a84608..0000000
--- a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-liberty
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on trusty-liberty."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='trusty',
- openstack='cloud:trusty-liberty',
- source='cloud:trusty-updates/liberty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-mitaka b/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-mitaka
deleted file mode 100755
index c1e96f5..0000000
--- a/charms/trusty/ceilometer-agent/tests/gate-basic-trusty-mitaka
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on trusty-mitaka."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='trusty',
- openstack='cloud:trusty-mitaka',
- source='cloud:trusty-updates/mitaka')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/gate-basic-xenial-mitaka b/charms/trusty/ceilometer-agent/tests/gate-basic-xenial-mitaka
deleted file mode 100755
index fed35f1..0000000
--- a/charms/trusty/ceilometer-agent/tests/gate-basic-xenial-mitaka
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Amulet tests on a basic ceilometer-agent deployment on xenial-mitaka."""
-
-from basic_deployment import CeiloAgentBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeiloAgentBasicDeployment(series='xenial')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer-agent/tests/tests.yaml b/charms/trusty/ceilometer-agent/tests/tests.yaml
deleted file mode 100644
index e3185c6..0000000
--- a/charms/trusty/ceilometer-agent/tests/tests.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Bootstrap the model if necessary.
-bootstrap: True
-# Re-use bootstrap node instead of destroying/re-bootstrapping.
-reset: True
-# Use tox/requirements to drive the venv instead of bundletester's venv feature.
-virtualenv: False
-# Leave makefile empty, otherwise unit/lint tests will rerun ahead of amulet.
-makefile: []
-# Do not specify juju PPA sources. Juju is presumed to be pre-installed
-# and configured in all test runner environments.
-#sources:
-# Do not specify or rely on system packages.
-#packages:
-# Do not specify python packages here. Use test-requirements.txt
-# and tox instead. ie. The venv is constructed before bundletester
-# is invoked.
-#python-packages:
diff --git a/charms/trusty/ceilometer-agent/tox.ini b/charms/trusty/ceilometer-agent/tox.ini
deleted file mode 100644
index b73c644..0000000
--- a/charms/trusty/ceilometer-agent/tox.ini
+++ /dev/null
@@ -1,75 +0,0 @@
-[tox]
-envlist = pep8,py27
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
- PYTHONHASHSEED=0
- AMULET_SETUP_TIMEOUT=2700
-passenv = HOME TERM AMULET_HTTP_PROXY AMULET_OS_VIP
-install_command =
- pip install --allow-unverified python-apt {opts} {packages}
-commands = ostestr {posargs}
-
-[testenv:py27]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-
-[testenv:pep8]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands = flake8 {posargs} --exclude */charmhelpers hooks unit_tests tests actions
- charm-proof
-
-[testenv:venv]
-commands = {posargs}
-
-[testenv:func27-noop]
-# DRY RUN - For Debug
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
-
-[testenv:func27]
-# Charm Functional Test
-# Run all gate tests which are +x (expected to always pass)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
-
-[testenv:func27-smoke]
-# Charm Functional Test
-# Run a specific test as an Amulet smoke test (expected to always pass)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
-
-[testenv:func27-dfs]
-# Charm Functional Test
-# Run all deploy-from-source tests which are +x (may not always pass!)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
-
-[testenv:func27-dev]
-# Charm Functional Test
-# Run all development test targets which are +x (may not always pass!)
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands =
- bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dev-*" --no-destroy
-
-[flake8]
-ignore = E402,E226
-exclude = hooks/charmhelpers
diff --git a/charms/trusty/ceilometer-agent/unit_tests/__init__.py b/charms/trusty/ceilometer-agent/unit_tests/__init__.py
deleted file mode 100644
index 6c9ae40..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-sys.path.append('actions')
-sys.path.append('hooks')
diff --git a/charms/trusty/ceilometer-agent/unit_tests/test_actions.py b/charms/trusty/ceilometer-agent/unit_tests/test_actions.py
deleted file mode 100644
index 6f37c83..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/test_actions.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-from mock import patch
-
-from test_utils import CharmTestCase
-
-with patch('ceilometer_utils.register_configs') as configs:
- configs.return_value = 'test-config'
- import actions
-
-
-class PauseTestCase(CharmTestCase):
-
- def setUp(self):
- super(PauseTestCase, self).setUp(
- actions, ["pause_unit_helper"])
-
- def test_pauses_services(self):
- actions.pause([])
- self.pause_unit_helper.assert_called_once_with('test-config')
-
-
-class ResumeTestCase(CharmTestCase):
-
- def setUp(self):
- super(ResumeTestCase, self).setUp(
- actions, ["resume_unit_helper"])
-
- def test_pauses_services(self):
- actions.resume([])
- self.resume_unit_helper.assert_called_once_with('test-config')
-
-
-class MainTestCase(CharmTestCase):
-
- def setUp(self):
- super(MainTestCase, self).setUp(actions, ["action_fail"])
-
- def test_invokes_action(self):
- dummy_calls = []
-
- def dummy_action(args):
- dummy_calls.append(True)
-
- with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
- actions.main(["foo"])
- self.assertEqual(dummy_calls, [True])
-
- def test_unknown_action(self):
- """Unknown actions aren't a traceback."""
- exit_string = actions.main(["foo"])
- self.assertEqual("Action foo undefined", exit_string)
-
- def test_failing_action(self):
- """Actions which traceback trigger action_fail() calls."""
- dummy_calls = []
-
- self.action_fail.side_effect = dummy_calls.append
-
- def dummy_action(args):
- raise ValueError("uh oh")
-
- with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
- actions.main(["foo"])
- self.assertEqual(dummy_calls, ["uh oh"])
diff --git a/charms/trusty/ceilometer-agent/unit_tests/test_actions_openstack_upgrade.py b/charms/trusty/ceilometer-agent/unit_tests/test_actions_openstack_upgrade.py
deleted file mode 100644
index 7628232..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/test_actions_openstack_upgrade.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from mock import patch
-import os
-
-os.environ['JUJU_UNIT_NAME'] = 'ceilometer'
-
-with patch('ceilometer_utils.register_configs') as register_configs:
- import openstack_upgrade
-
-from test_utils import (
- CharmTestCase
-)
-
-TO_PATCH = [
- 'config_changed',
- 'do_openstack_upgrade',
-]
-
-
-class TestCinderUpgradeActions(CharmTestCase):
-
- def setUp(self):
- super(TestCinderUpgradeActions, self).setUp(openstack_upgrade,
- TO_PATCH)
-
- @patch('charmhelpers.contrib.openstack.utils.juju_log')
- @patch('charmhelpers.contrib.openstack.utils.config')
- @patch('charmhelpers.contrib.openstack.utils.action_set')
- @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
- @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
- def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
- action_set, config, log):
- git_requested.return_value = False
- upgrade_avail.return_value = True
- config.return_value = True
-
- openstack_upgrade.openstack_upgrade()
-
- self.assertTrue(self.do_openstack_upgrade.called)
- self.assertTrue(self.config_changed.called)
-
- @patch('charmhelpers.contrib.openstack.utils.juju_log')
- @patch('charmhelpers.contrib.openstack.utils.config')
- @patch('charmhelpers.contrib.openstack.utils.action_set')
- @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
- @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
- def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
- action_set, config, log):
- git_requested.return_value = False
- upgrade_avail.return_value = True
- config.return_value = False
-
- openstack_upgrade.openstack_upgrade()
-
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertFalse(self.config_changed.called)
diff --git a/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_contexts.py b/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_contexts.py
deleted file mode 100644
index fdbbf15..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_contexts.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import ceilometer_contexts as contexts
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'relation_get',
- 'relation_ids',
- 'related_units',
-]
-
-
-class CeilometerContextsTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerContextsTest, self).setUp(contexts, TO_PATCH)
- self.relation_get.side_effect = self.test_relation.get
-
- def tearDown(self):
- super(CeilometerContextsTest, self).tearDown()
-
- def test_ceilometer_service_context(self):
- self.relation_ids.return_value = ['ceilometer-service:0']
- self.related_units.return_value = ['ceilometer/0']
- data = {
- 'debug': True,
- 'verbose': False,
- 'rabbitmq_host': 'foo',
- 'rabbitmq_user': 'bar',
- 'rabbitmq_password': 'baz',
- 'rabbitmq_virtual_host': 'openstack',
- 'rabbit_ssl_ca': None,
- 'rabbit_ssl_port': None,
- 'auth_protocol': 'http',
- 'auth_host': 'keystone',
- 'auth_port': '80',
- 'admin_tenant_name': 'admin',
- 'admin_user': 'admin',
- 'admin_password': 'password',
- 'metering_secret': 'secret'
- }
- self.test_relation.set(data)
- self.assertEquals(contexts.CeilometerServiceContext()(), data)
-
- def test_ceilometer_service_context_not_related(self):
- self.relation_ids.return_value = []
- self.assertEquals(contexts.CeilometerServiceContext()(), {})
diff --git a/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_hooks.py b/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_hooks.py
deleted file mode 100644
index 67a933b..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_hooks.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-from mock import patch, MagicMock
-
-import ceilometer_utils
-# Patch out register_configs for import of hooks
-_register_configs = ceilometer_utils.register_configs
-ceilometer_utils.register_configs = MagicMock()
-
-import ceilometer_hooks as hooks
-
-# Renable old function
-ceilometer_utils.register_configs = _register_configs
-
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'configure_installation_source',
- 'apt_install',
- 'apt_update',
- 'config',
- 'filter_installed_packages',
- 'CONFIGS',
- 'relation_set',
- 'openstack_upgrade_available',
- 'do_openstack_upgrade',
- 'update_nrpe_config',
- 'is_relation_made',
-]
-
-
-class CeilometerHooksTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerHooksTest, self).setUp(hooks, TO_PATCH)
- self.config.side_effect = self.test_config.get
-
- @patch('charmhelpers.core.hookenv.config')
- def test_configure_source(self, mock_config):
- self.test_config.set('openstack-origin', 'cloud:precise-havana')
- hooks.hooks.execute(['hooks/install'])
- self.configure_installation_source.\
- assert_called_with('cloud:precise-havana')
-
- @patch('charmhelpers.core.hookenv.config')
- def test_install_hook(self, mock_config):
- self.filter_installed_packages.return_value = \
- hooks.CEILOMETER_AGENT_PACKAGES
- hooks.hooks.execute(['hooks/install'])
- self.assertTrue(self.configure_installation_source.called)
- self.apt_update.assert_called_with(fatal=True)
- self.apt_install.assert_called_with(hooks.CEILOMETER_AGENT_PACKAGES,
- fatal=True)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_ceilometer_changed(self, mock_config):
- hooks.hooks.execute(['hooks/ceilometer-service-relation-changed'])
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(self.update_nrpe_config.called)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_ceilometer_changed_no_nrpe(self, mock_config):
- self.is_relation_made.return_value = False
-
- hooks.hooks.execute(['hooks/ceilometer-service-relation-changed'])
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertFalse(self.update_nrpe_config.called)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_nova_ceilometer_joined(self, mock_config):
- hooks.hooks.execute(['hooks/nova-ceilometer-relation-joined'])
- self.relation_set.assert_called_with(
- subordinate_configuration=json.dumps(
- ceilometer_utils.NOVA_SETTINGS))
-
- @patch('charmhelpers.core.hookenv.config')
- def test_config_changed_no_upgrade(self, mock_config):
- self.openstack_upgrade_available.return_value = False
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(self.update_nrpe_config.called)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_config_changed_upgrade(self, mock_config):
- self.openstack_upgrade_available.return_value = True
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertTrue(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(self.update_nrpe_config.called)
-
- def test_config_changed_with_openstack_upgrade_action(self):
- self.openstack_upgrade_available.return_value = True
- self.test_config.set('action-managed-upgrade', True)
-
- hooks.hooks.execute(['hooks/config-changed'])
-
- self.assertFalse(self.do_openstack_upgrade.called)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_config_changed_no_nrpe(self, mock_config):
- self.openstack_upgrade_available.return_value = False
- self.is_relation_made.return_value = False
-
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertFalse(self.update_nrpe_config.called)
diff --git a/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_utils.py b/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_utils.py
deleted file mode 100644
index 01ba074..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/test_ceilometer_utils.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from mock import call, MagicMock, patch
-
-import ceilometer_utils as utils
-
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'get_os_codename_package',
- 'templating',
- 'CeilometerServiceContext',
- 'config',
- 'get_os_codename_install_source',
- 'configure_installation_source',
- 'apt_install',
- 'apt_update',
- 'apt_upgrade',
- 'log'
-]
-
-
-class CeilometerUtilsTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerUtilsTest, self).setUp(utils, TO_PATCH)
-
- def tearDown(self):
- super(CeilometerUtilsTest, self).tearDown()
-
- def test_register_configs(self):
- configs = utils.register_configs()
- calls = []
- for conf in utils.CONFIG_FILES:
- calls.append(call(conf,
- utils.CONFIG_FILES[conf]['hook_contexts']))
- configs.register.assert_has_calls(calls, any_order=True)
-
- def test_restart_map(self):
- restart_map = utils.restart_map()
- self.assertEquals(restart_map,
- {'/etc/ceilometer/ceilometer.conf': [
- 'ceilometer-agent-compute']})
-
- def test_do_openstack_upgrade(self):
- self.config.side_effect = self.test_config.get
- self.test_config.set('openstack-origin', 'cloud:precise-havana')
- self.get_os_codename_install_source.return_value = 'havana'
- configs = MagicMock()
- utils.do_openstack_upgrade(configs)
- configs.set_release.assert_called_with(openstack_release='havana')
- self.assertTrue(self.log.called)
- self.apt_update.assert_called_with(fatal=True)
- dpkg_opts = [
- '--option', 'Dpkg::Options::=--force-confnew',
- '--option', 'Dpkg::Options::=--force-confdef',
- ]
- self.apt_install.assert_called_with(
- packages=utils.CEILOMETER_AGENT_PACKAGES,
- options=dpkg_opts, fatal=True
- )
- self.configure_installation_source.assert_called_with(
- 'cloud:precise-havana'
- )
-
- def test_assess_status(self):
- with patch.object(utils, 'assess_status_func') as asf:
- callee = MagicMock()
- asf.return_value = callee
- utils.assess_status('test-config')
- asf.assert_called_once_with('test-config')
- callee.assert_called_once_with()
-
- @patch.object(utils, 'REQUIRED_INTERFACES')
- @patch.object(utils, 'services')
- @patch.object(utils, 'make_assess_status_func')
- def test_assess_status_func(self,
- make_assess_status_func,
- services,
- REQUIRED_INTERFACES):
- services.return_value = 's1'
- utils.assess_status_func('test-config')
- # ports=None whilst port checks are disabled.
- make_assess_status_func.assert_called_once_with(
- 'test-config', REQUIRED_INTERFACES, services='s1', ports=None)
-
- def test_pause_unit_helper(self):
- with patch.object(utils, '_pause_resume_helper') as prh:
- utils.pause_unit_helper('random-config')
- prh.assert_called_once_with(utils.pause_unit, 'random-config')
- with patch.object(utils, '_pause_resume_helper') as prh:
- utils.resume_unit_helper('random-config')
- prh.assert_called_once_with(utils.resume_unit, 'random-config')
-
- @patch.object(utils, 'services')
- def test_pause_resume_helper(self, services):
- f = MagicMock()
- services.return_value = 's1'
- with patch.object(utils, 'assess_status_func') as asf:
- asf.return_value = 'assessor'
- utils._pause_resume_helper(f, 'some-config')
- asf.assert_called_once_with('some-config')
- # ports=None whilst port checks are disabled.
- f.assert_called_once_with('assessor', services='s1', ports=None)
diff --git a/charms/trusty/ceilometer-agent/unit_tests/test_utils.py b/charms/trusty/ceilometer-agent/unit_tests/test_utils.py
deleted file mode 100644
index 09d77a6..0000000
--- a/charms/trusty/ceilometer-agent/unit_tests/test_utils.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import unittest
-import os
-import yaml
-import io
-
-from contextlib import contextmanager
-from mock import patch
-
-
-@contextmanager
-def mock_open(filename, contents=None):
- ''' Slightly simpler mock of open to return contents for filename '''
- def mock_file(*args):
- if args[0] == filename:
- return io.StringIO(contents)
- else:
- return open(*args)
- with patch('__builtin__.open', mock_file):
- yield
-
-
-def load_config():
- '''
- Walk backwords from __file__ looking for config.yaml, load and return the
- 'options' section'
- '''
- config = None
- f = __file__
- while config is None:
- d = os.path.dirname(f)
- if os.path.isfile(os.path.join(d, 'config.yaml')):
- config = os.path.join(d, 'config.yaml')
- break
- f = d
-
- if not config:
- logging.error('Could not find config.yaml in any parent directory '
- 'of %s. ' % file)
- raise Exception
-
- return yaml.safe_load(open(config).read())['options']
-
-
-def get_default_config():
- '''
- Load default charm config from config.yaml return as a dict.
- If no default is set in config.yaml, its value is None.
- '''
- default_config = {}
- config = load_config()
- for k, v in config.iteritems():
- if 'default' in v:
- default_config[k] = v['default']
- else:
- default_config[k] = None
- return default_config
-
-
-class CharmTestCase(unittest.TestCase):
-
- def setUp(self, obj, patches):
- super(CharmTestCase, self).setUp()
- self.patches = patches
- self.obj = obj
- self.test_config = TestConfig()
- self.test_relation = TestRelation()
- self.patch_all()
-
- def patch(self, method):
- _m = patch.object(self.obj, method)
- mock = _m.start()
- self.addCleanup(_m.stop)
- return mock
-
- def patch_all(self):
- for method in self.patches:
- setattr(self, method, self.patch(method))
-
-
-class TestConfig(object):
-
- def __init__(self):
- self.config = get_default_config()
-
- def get(self, attr):
- try:
- return self.config[attr]
- except KeyError:
- return None
-
- def get_all(self):
- return self.config
-
- def set(self, attr, value):
- if attr not in self.config:
- raise KeyError
- self.config[attr] = value
-
-
-class TestRelation(object):
-
- def __init__(self, relation_data={}):
- self.relation_data = relation_data
-
- def set(self, relation_data):
- self.relation_data = relation_data
-
- def get(self, attr=None, unit=None, rid=None):
- if attr is None:
- return self.relation_data
- elif attr in self.relation_data:
- return self.relation_data[attr]
- return None
diff --git a/charms/trusty/ceilometer-contrail/.bzrignore b/charms/trusty/ceilometer-contrail/.bzrignore
deleted file mode 100644
index ba077a4..0000000
--- a/charms/trusty/ceilometer-contrail/.bzrignore
+++ /dev/null
@@ -1 +0,0 @@
-bin
diff --git a/charms/trusty/ceilometer-contrail/Makefile b/charms/trusty/ceilometer-contrail/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/ceilometer-contrail/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/ceilometer-contrail/README.md b/charms/trusty/ceilometer-contrail/README.md
deleted file mode 100644
index b42e316..0000000
--- a/charms/trusty/ceilometer-contrail/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This subordinate charm provides the Ceilometer plugin component.
-Only OpenStack Icehouse or newer is supported.
-
-Usage
------
-
-Ceilometer and Contrail Analytics are prerequisite services to deploy.
-
-Once ready, deploy and relate as follows:
-
- juju deploy ceilometer-contrail
- juju add-relation ceilometer ceilometer-contrail
- juju add-relation ceilometer-contrail contrail-analytics
-
-Install Sources
----------------
-
-The version of Contrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
diff --git a/charms/trusty/ceilometer-contrail/charm-helpers-sync.yaml b/charms/trusty/ceilometer-contrail/charm-helpers-sync.yaml
deleted file mode 100644
index 0af5672..0000000
--- a/charms/trusty/ceilometer-contrail/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
diff --git a/charms/trusty/ceilometer-contrail/config.yaml b/charms/trusty/ceilometer-contrail/config.yaml
deleted file mode 100644
index aae1147..0000000
--- a/charms/trusty/ceilometer-contrail/config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-options:
- install-sources:
- type: string
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
diff --git a/charms/trusty/ceilometer-contrail/copyright b/charms/trusty/ceilometer-contrail/copyright
deleted file mode 100644
index 1b957cd..0000000
--- a/charms/trusty/ceilometer-contrail/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2016, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer-contrail/hooks/ceilometer-plugin-relation-joined b/charms/trusty/ceilometer-contrail/hooks/ceilometer-plugin-relation-joined
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/ceilometer-plugin-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_hooks.py b/charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_hooks.py
deleted file mode 100755
index 5244a72..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_hooks.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python
-
-from collections import OrderedDict
-from socket import gethostbyname
-import sys
-
-import yaml
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- log,
- related_units,
- relation_get,
- relation_ids,
- relation_set
-)
-
-from charmhelpers.fetch import (
- apt_install,
- apt_upgrade,
- configure_sources
-)
-
-from ceilometer_contrail_utils import contrail_analytics_api_units, units
-
-PACKAGES = [ "ceilometer-plugin-contrail" ]
-
-hooks = Hooks()
-config = config()
-
-@hooks.hook("ceilometer-plugin-relation-joined")
-def ceilometer_plugin_joined():
- if contrail_analytics_api_units():
- configure_plugin()
-
-@hooks.hook("config-changed")
-def config_changed():
- pass
-
-def configure_plugin():
- # create plugin config
- api_ip, api_port = [
- (vip if vip else gethostbyname(relation_get("private-address", unit, rid)),
- port)
- for rid in relation_ids("contrail-analytics-api")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ][0]
- meter_sources = [
- OrderedDict([("name", "contrail_source"),
- ("interval", 600),
- ("meters", ["ip.floating.receive.packets",
- "ip.floating.transmit.packets",
- "ip.floating.receive.bytes",
- "ip.floating.transmit.bytes"]),
- ("resources", ["contrail://{}:{}".format(api_ip, api_port)]),
- ("sinks", ["contrail_sink"])])]
- meter_sinks = [
- OrderedDict([("name", "contrail_sink"),
- ("publishers", ["rpc://"]),
- ("transformers", [])])]
- settings = { "meter-sources": yaml.dump(meter_sources),
- "meter-sinks": yaml.dump(meter_sinks) }
- for rid in relation_ids("ceilometer-plugin"):
- relation_set(relation_id=rid, relation_settings=settings)
-
-@hooks.hook("contrail-analytics-api-relation-changed")
-def contrail_analytics_api_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- if units("ceilometer-plugin"):
- configure_plugin()
-
-@hooks.hook("contrail-analytics-api-relation-departed")
-@hooks.hook("contrail-analytics-api-relation-broken")
-def contrail_analytics_api_departed():
- if not units("contrail-analytics-api"):
- remove_plugin()
-
-@hooks.hook()
-def install():
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- apt_install(PACKAGES, fatal=True)
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
-
-def remove_plugin():
- settings = { "meter-sources": None, "meter-sinks": None }
- for rid in relation_ids("ceilometer-plugin"):
- relation_set(relation_id=rid, relation_settings=settings)
-
-@hooks.hook("upgrade-charm")
-def upgrade_charm():
- pass
-
-if __name__ == "__main__":
- main()
diff --git a/charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_utils.py b/charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_utils.py
deleted file mode 100644
index 6bab06e..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/ceilometer_contrail_utils.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from collections import OrderedDict
-
-import yaml
-
-from charmhelpers.core.hookenv import (
- related_units,
- relation_get,
- relation_ids
-)
-
-def ordereddict_representer(dumper, data):
- return dumper.represent_mapping("tag:yaml.org,2002:map", data.items())
-
-yaml.add_representer(OrderedDict, ordereddict_representer)
-
-def contrail_analytics_api_units():
- """Return a list of contrail analytics api units"""
- return [ unit for rid in relation_ids("contrail-analytics-api")
- for unit in related_units(rid)
- if relation_get("port", unit, rid) ]
-
-def units(relation):
- """Return a list of units for the specified relation"""
- return [ unit for rid in relation_ids(relation)
- for unit in related_units(rid) ]
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/__init__.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/__init__.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/decorators.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/files.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/fstab.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hookenv.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index 0132129..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1009 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The <class> and <id> provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The <class> and <id> provided must match a payload that has been previously
- registered with juju using payload-register. The <status> must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- <name> must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).strip()
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/host.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/host.py
deleted file mode 100644
index 64b2df5..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,714 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = True
- if service_running(service_name):
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_running(service_name)
- if not started:
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-_UPSTART_CONF = "/etc/init/{}.conf"
-_INIT_D_CONF = "/etc/init.d/{}"
-
-
-def service_running(service_name):
- """Determine whether a system service is running"""
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- if os.path.exists(_UPSTART_CONF.format(service_name)):
- try:
- output = subprocess.check_output(
- ['status', service_name],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- # This works for upstart scripts where the 'service' command
- # returns a consistent string to represent running 'start/running'
- if "start/running" in output:
- return True
- elif os.path.exists(_INIT_D_CONF.format(service_name)):
- # Check System V scripts init script return codes
- return service('status', service_name)
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False,
- primary_group=None, secondary_groups=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False, restart_functions=None):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result of lambda_f()
- """
- if restart_functions is None:
- restart_functions = {}
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for service_name in services_list:
- if service_name in restart_functions:
- restart_functions[service_name](service_name)
- else:
- for action in actions:
- service(action, service_name)
- return r
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also Chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hugepage.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index a783ad9..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/kernel.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/base.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 2423704..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/strutils.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/sysctl.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/templating.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index d2d8eaf..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/unitdata.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py.orig b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py.orig
deleted file mode 100644
index ad485ec..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/__init__.py.orig
+++ /dev/null
@@ -1,472 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
- # Newton
- 'newton': 'xenial-updates/newton',
- 'xenial-newton': 'xenial-updates/newton',
- 'xenial-newton/updates': 'xenial-updates/newton',
- 'xenial-updates/newton': 'xenial-updates/newton',
- 'newton/proposed': 'xenial-proposed/newton',
- 'xenial-newton/proposed': 'xenial-proposed/newton',
- 'xenial-proposed/newton': 'xenial-proposed/newton',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except NotImplementedError:
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index b8e0943..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'wb') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index cafd27f..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-from charmhelpers.core.host import mkdir
-
-
-if filter_installed_packages(['bzr']) != []:
- apt_install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
- else:
- return True
-
- def branch(self, source, dest):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if os.path.exists(dest):
- check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
- else:
- check_call(['bzr', 'branch', source, dest])
-
- def install(self, source, dest=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
-
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index 65ed531..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call, CalledProcessError
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-
-if filter_installed_packages(['git']) != []:
- apt_install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
- else:
- return True
-
- def clone(self, source, dest, branch="master", depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
- else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
- if depth:
- cmd.extend(['--depth', depth])
- check_call(cmd)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- try:
- self.clone(source, dest_dir, branch, depth)
- except CalledProcessError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer-contrail/hooks/config-changed b/charms/trusty/ceilometer-contrail/hooks/config-changed
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-broken b/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-broken
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-changed b/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-changed
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-departed b/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-departed
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/contrail-analytics-api-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/install b/charms/trusty/ceilometer-contrail/hooks/install
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/start b/charms/trusty/ceilometer-contrail/hooks/start
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/stop b/charms/trusty/ceilometer-contrail/hooks/stop
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/hooks/upgrade-charm b/charms/trusty/ceilometer-contrail/hooks/upgrade-charm
deleted file mode 120000
index 39b92e2..0000000
--- a/charms/trusty/ceilometer-contrail/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer-contrail/icon.svg b/charms/trusty/ceilometer-contrail/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/ceilometer-contrail/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/ceilometer-contrail/metadata.yaml b/charms/trusty/ceilometer-contrail/metadata.yaml
deleted file mode 100644
index 15cd654..0000000
--- a/charms/trusty/ceilometer-contrail/metadata.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: ceilometer-contrail
-summary: OpenStack Telemetry Contrail Plugin
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- Ceilometer project aims to become the infrastructure to collect measurements
- within OpenStack so that no two agents would need to be written to collect
- the same data. It's primary targets are monitoring and metering, but the
- framework should be easily expandable to collect for other needs. To that
- effect, Ceilometer should be able to share collected data with a variety
- of consumers.
- .
- This charm provides the Ceilometer Contrail plugin.
-categories:
- - openstack
-subordinate: true
-provides:
- ceilometer-plugin:
- interface: ceilometer-plugin
- scope: container
-requires:
- container:
- interface: juju-info
- scope: container
- contrail-analytics-api:
- interface: contrail-analytics-api
diff --git a/charms/trusty/ceilometer/.bzr/README b/charms/trusty/ceilometer/.bzr/README
deleted file mode 100644
index f82dc1c..0000000
--- a/charms/trusty/ceilometer/.bzr/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a Bazaar control directory.
-Do not change any files in this directory.
-See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/ceilometer/.bzr/branch-format b/charms/trusty/ceilometer/.bzr/branch-format
deleted file mode 100644
index 9eb09b7..0000000
--- a/charms/trusty/ceilometer/.bzr/branch-format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/ceilometer/.bzr/branch/format b/charms/trusty/ceilometer/.bzr/branch/format
deleted file mode 100644
index b391ffd..0000000
--- a/charms/trusty/ceilometer/.bzr/branch/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG Branch Reference Format 1
diff --git a/charms/trusty/ceilometer/.bzr/branch/location b/charms/trusty/ceilometer/.bzr/branch/location
deleted file mode 100644
index a5d57cb..0000000
--- a/charms/trusty/ceilometer/.bzr/branch/location
+++ /dev/null
@@ -1 +0,0 @@
-bzr+ssh://bazaar.launchpad.net/~sdn-charmers/charms/trusty/ceilometer/ceilometer-plugin/ \ No newline at end of file
diff --git a/charms/trusty/ceilometer/.bzr/checkout/conflicts b/charms/trusty/ceilometer/.bzr/checkout/conflicts
deleted file mode 100644
index 0dc2d3a..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/conflicts
+++ /dev/null
@@ -1 +0,0 @@
-BZR conflict list format 1
diff --git a/charms/trusty/ceilometer/.bzr/checkout/dirstate b/charms/trusty/ceilometer/.bzr/checkout/dirstate
deleted file mode 100644
index 852f177..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/dirstate
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/ceilometer/.bzr/checkout/format b/charms/trusty/ceilometer/.bzr/checkout/format
deleted file mode 100644
index e0261c7..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/ceilometer/.bzr/checkout/views b/charms/trusty/ceilometer/.bzr/checkout/views
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/.bzr/checkout/views
+++ /dev/null
diff --git a/charms/trusty/ceilometer/.coveragerc b/charms/trusty/ceilometer/.coveragerc
deleted file mode 100644
index ed08ec9..0000000
--- a/charms/trusty/ceilometer/.coveragerc
+++ /dev/null
@@ -1,6 +0,0 @@
-[report]
-# Regexes for lines to exclude from consideration
-exclude_lines =
- if __name__ == .__main__.:
-include=
- hooks/ceilometer_*
diff --git a/charms/trusty/ceilometer/.gitignore b/charms/trusty/ceilometer/.gitignore
deleted file mode 100644
index 25d8aec..0000000
--- a/charms/trusty/ceilometer/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-bin
-.coverage
-.testrepository
-.tox
-tags
-*.sw[nop]
-*.pyc
diff --git a/charms/trusty/ceilometer/.gitreview b/charms/trusty/ceilometer/.gitreview
deleted file mode 100644
index 7ff19b3..0000000
--- a/charms/trusty/ceilometer/.gitreview
+++ /dev/null
@@ -1,5 +0,0 @@
-[gerrit]
-host=review.openstack.org
-port=29418
-project=openstack/charm-ceilometer.git
-defaultbranch=stable/16.04
diff --git a/charms/trusty/ceilometer/.project b/charms/trusty/ceilometer/.project
deleted file mode 100644
index 9e30b38..0000000
--- a/charms/trusty/ceilometer/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<projectDescription>
- <name>ceilometer</name>
- <comment></comment>
- <projects>
- </projects>
- <buildSpec>
- <buildCommand>
- <name>org.python.pydev.PyDevBuilder</name>
- <arguments>
- </arguments>
- </buildCommand>
- </buildSpec>
- <natures>
- <nature>org.python.pydev.pythonNature</nature>
- </natures>
-</projectDescription>
diff --git a/charms/trusty/ceilometer/.pydevproject b/charms/trusty/ceilometer/.pydevproject
deleted file mode 100644
index a338b81..0000000
--- a/charms/trusty/ceilometer/.pydevproject
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?eclipse-pydev version="1.0"?><pydev_project>
-<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
-<path>/ceilometer/hooks</path>
-<path>/ceilometer/unit_tests</path>
-</pydev_pathproperty>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
-</pydev_project>
diff --git a/charms/trusty/ceilometer/.testr.conf b/charms/trusty/ceilometer/.testr.conf
deleted file mode 100644
index 801646b..0000000
--- a/charms/trusty/ceilometer/.testr.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
- OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
- OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
- ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
-
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/charms/trusty/ceilometer/Makefile b/charms/trusty/ceilometer/Makefile
deleted file mode 100644
index 64d2c34..0000000
--- a/charms/trusty/ceilometer/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-clean:
- rm -rf .coverage .tox .testrepository trusty .unit-state.db
- find . -iname '*.pyc' -delete
-
-lint:
- @tox -e pep8
-
-test:
- @# Bundletester expects unit tests here.
- tox -e py27
-
-functional_test:
- @echo Starting Amulet tests...
- @tests/setup/00-setup
- @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
-
-publish: lint test
- bzr push lp:charms/ceilometer
- bzr push lp:charms/trusty/ceilometer
diff --git a/charms/trusty/ceilometer/README.md b/charms/trusty/ceilometer/README.md
deleted file mode 100644
index 6c69440..0000000
--- a/charms/trusty/ceilometer/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-Overview
---------
-
-This charm provides the Ceilometer service for OpenStack. It is intended to
-be used alongside the other OpenStack components, starting with the Folsom
-release.
-
-Ceilometer is made up of 2 separate services: an API service, and a collector
-service. This charm allows them to be deployed in different combination,
-depending on user preference and requirements.
-
-This charm was developed to support deploying Folsom on both Ubuntu Quantal
-and Ubuntu Precise. Since Ceilometer is only available for Ubuntu 12.04 via
-the Ubuntu Cloud Archive, deploying this charm to a Precise machine will by
-default install Ceilometer and its dependencies from the Cloud Archive.
-
-Usage
------
-
-In order to deploy Ceilometer service, the MongoDB service is required:
-
- juju deploy mongodb
- juju deploy ceilometer
- juju add-relation ceilometer mongodb
-
-then Keystone and Rabbit relationships need to be established:
-
- juju add-relation ceilometer rabbitmq
- juju add-relation ceilometer keystone:identity-service
- juju add-relation ceilometer keystone:identity-notifications
-
-In order to capture the calculations, a Ceilometer compute agent needs to be
-installed in each nova node, and be related with Ceilometer service:
-
- juju deploy ceilometer-agent
- juju add-relation ceilometer-agent nova-compute
- juju add-relation ceilometer:ceilometer-service ceilometer-agent:ceilometer-service
-
-Ceilometer provides an API service that can be used to retrieve
-Openstack metrics.
-
-Network Space support
----------------------
-
-This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above.
-
-API endpoints can be bound to distinct network spaces supporting the network separation of public, internal and admin endpoints.
-
-To use this feature, use the --bind option when deploying the charm:
-
- juju deploy ceilometer --bind "public=public-space internal=internal-space admin=admin-space"
-
-alternatively these can also be provided as part of a juju native bundle configuration:
-
- ceilometer:
- charm: cs:xenial/ceilometer
- bindings:
- public: public-space
- admin: admin-space
- internal: internal-space
-
-NOTE: Spaces must be configured in the underlying provider prior to attempting to use them.
-
-NOTE: Existing deployments using os-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
diff --git a/charms/trusty/ceilometer/actions.yaml b/charms/trusty/ceilometer/actions.yaml
deleted file mode 100644
index dea9d08..0000000
--- a/charms/trusty/ceilometer/actions.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-pause:
- description: Pause the Ceilometer unit. This action will stop Ceilometer services.
-resume:
- descrpition: Resume the Ceilometer unit. This action will start Ceilometer services.
-openstack-upgrade:
- description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
diff --git a/charms/trusty/ceilometer/actions/actions.py b/charms/trusty/ceilometer/actions/actions.py
deleted file mode 100755
index 0c077b6..0000000
--- a/charms/trusty/ceilometer/actions/actions.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/python
-
-import os
-import sys
-
-from charmhelpers.core.hookenv import action_fail
-from ceilometer_utils import (
- pause_unit_helper,
- resume_unit_helper,
- register_configs,
-)
-
-
-def pause(args):
- """Pause the Ceilometer services.
-
- @raises Exception should the service fail to stop.
- """
- pause_unit_helper(register_configs())
-
-
-def resume(args):
- """Resume the Ceilometer services.
-
- @raises Exception should the service fail to start."""
- resume_unit_helper(register_configs())
-
-
-# A dictionary of all the defined actions to callables (which take
-# parsed arguments).
-ACTIONS = {"pause": pause, "resume": resume}
-
-
-def main(args):
- action_name = os.path.basename(args[0])
- try:
- action = ACTIONS[action_name]
- except KeyError:
- return "Action %s undefined" % action_name
- else:
- try:
- action(args)
- except Exception as e:
- action_fail(str(e))
-
-
-if __name__ == "__main__":
- sys.exit(main(sys.argv))
diff --git a/charms/trusty/ceilometer/actions/ceilometer_contexts.py b/charms/trusty/ceilometer/actions/ceilometer_contexts.py
deleted file mode 120000
index 6c03421..0000000
--- a/charms/trusty/ceilometer/actions/ceilometer_contexts.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_contexts.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/ceilometer_hooks.py b/charms/trusty/ceilometer/actions/ceilometer_hooks.py
deleted file mode 120000
index b55df6c..0000000
--- a/charms/trusty/ceilometer/actions/ceilometer_hooks.py
+++ /dev/null
@@ -1 +0,0 @@
-../hooks/ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/ceilometer_utils.py b/charms/trusty/ceilometer/actions/ceilometer_utils.py
deleted file mode 120000
index e333253..0000000
--- a/charms/trusty/ceilometer/actions/ceilometer_utils.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_utils.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/charmhelpers b/charms/trusty/ceilometer/actions/charmhelpers
deleted file mode 120000
index 702de73..0000000
--- a/charms/trusty/ceilometer/actions/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/openstack-upgrade b/charms/trusty/ceilometer/actions/openstack-upgrade
deleted file mode 120000
index 6179301..0000000
--- a/charms/trusty/ceilometer/actions/openstack-upgrade
+++ /dev/null
@@ -1 +0,0 @@
-openstack_upgrade.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/openstack_upgrade.py b/charms/trusty/ceilometer/actions/openstack_upgrade.py
deleted file mode 100755
index cb04493..0000000
--- a/charms/trusty/ceilometer/actions/openstack_upgrade.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/python
-import sys
-
-sys.path.append('hooks/')
-
-from charmhelpers.contrib.openstack.utils import (
- do_action_openstack_upgrade,
-)
-
-from ceilometer_hooks import (
- config_changed,
- CONFIGS,
-)
-
-from ceilometer_utils import (
- do_openstack_upgrade,
-)
-
-
-def openstack_upgrade():
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed."""
-
- if (do_action_openstack_upgrade('ceilometer-common',
- do_openstack_upgrade,
- CONFIGS)):
- config_changed()
-
-if __name__ == '__main__':
- openstack_upgrade()
diff --git a/charms/trusty/ceilometer/actions/pause b/charms/trusty/ceilometer/actions/pause
deleted file mode 120000
index 405a394..0000000
--- a/charms/trusty/ceilometer/actions/pause
+++ /dev/null
@@ -1 +0,0 @@
-actions.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/actions/resume b/charms/trusty/ceilometer/actions/resume
deleted file mode 120000
index 405a394..0000000
--- a/charms/trusty/ceilometer/actions/resume
+++ /dev/null
@@ -1 +0,0 @@
-actions.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charm-helpers-hooks.yaml b/charms/trusty/ceilometer/charm-helpers-hooks.yaml
deleted file mode 100644
index b17947b..0000000
--- a/charms/trusty/ceilometer/charm-helpers-hooks.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-branch: lp:~openstack-charmers/charm-helpers/stable
-destination: hooks/charmhelpers
-include:
- - core
- - cli
- - fetch
- - contrib.openstack|inc=*
- - contrib.hahelpers
- - contrib.storage.linux
- - contrib.network.ip
- - contrib.python.packages
- - contrib.charmsupport
- - contrib.peerstorage
- - payload.execd
- - contrib.hardening|inc=* \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charm-helpers-tests.yaml b/charms/trusty/ceilometer/charm-helpers-tests.yaml
deleted file mode 100644
index c469f13..0000000
--- a/charms/trusty/ceilometer/charm-helpers-tests.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:~openstack-charmers/charm-helpers/stable
-destination: tests/charmhelpers
-include:
- - contrib.amulet
- - contrib.openstack.amulet
diff --git a/charms/trusty/ceilometer/charmhelpers/__init__.py b/charms/trusty/ceilometer/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/ceilometer/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/__init__.py b/charms/trusty/ceilometer/charmhelpers/cli/__init__.py
deleted file mode 100644
index 2d37ab3..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/__init__.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import inspect
-import argparse
-import sys
-
-from six.moves import zip
-
-import charmhelpers.core.unitdata
-
-
-class OutputFormatter(object):
- def __init__(self, outfile=sys.stdout):
- self.formats = (
- "raw",
- "json",
- "py",
- "yaml",
- "csv",
- "tab",
- )
- self.outfile = outfile
-
- def add_arguments(self, argument_parser):
- formatgroup = argument_parser.add_mutually_exclusive_group()
- choices = self.supported_formats
- formatgroup.add_argument("--format", metavar='FMT',
- help="Select output format for returned data, "
- "where FMT is one of: {}".format(choices),
- choices=choices, default='raw')
- for fmt in self.formats:
- fmtfunc = getattr(self, fmt)
- formatgroup.add_argument("-{}".format(fmt[0]),
- "--{}".format(fmt), action='store_const',
- const=fmt, dest='format',
- help=fmtfunc.__doc__)
-
- @property
- def supported_formats(self):
- return self.formats
-
- def raw(self, output):
- """Output data as raw string (default)"""
- if isinstance(output, (list, tuple)):
- output = '\n'.join(map(str, output))
- self.outfile.write(str(output))
-
- def py(self, output):
- """Output data as a nicely-formatted python data structure"""
- import pprint
- pprint.pprint(output, stream=self.outfile)
-
- def json(self, output):
- """Output data in JSON format"""
- import json
- json.dump(output, self.outfile)
-
- def yaml(self, output):
- """Output data in YAML format"""
- import yaml
- yaml.safe_dump(output, self.outfile)
-
- def csv(self, output):
- """Output data as excel-compatible CSV"""
- import csv
- csvwriter = csv.writer(self.outfile)
- csvwriter.writerows(output)
-
- def tab(self, output):
- """Output data in excel-compatible tab-delimited format"""
- import csv
- csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
- csvwriter.writerows(output)
-
- def format_output(self, output, fmt='raw'):
- fmtfunc = getattr(self, fmt)
- fmtfunc(output)
-
-
-class CommandLine(object):
- argument_parser = None
- subparsers = None
- formatter = None
- exit_code = 0
-
- def __init__(self):
- if not self.argument_parser:
- self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
- if not self.formatter:
- self.formatter = OutputFormatter()
- self.formatter.add_arguments(self.argument_parser)
- if not self.subparsers:
- self.subparsers = self.argument_parser.add_subparsers(help='Commands')
-
- def subcommand(self, command_name=None):
- """
- Decorate a function as a subcommand. Use its arguments as the
- command-line arguments"""
- def wrapper(decorated):
- cmd_name = command_name or decorated.__name__
- subparser = self.subparsers.add_parser(cmd_name,
- description=decorated.__doc__)
- for args, kwargs in describe_arguments(decorated):
- subparser.add_argument(*args, **kwargs)
- subparser.set_defaults(func=decorated)
- return decorated
- return wrapper
-
- def test_command(self, decorated):
- """
- Subcommand is a boolean test function, so bool return values should be
- converted to a 0/1 exit code.
- """
- decorated._cli_test_command = True
- return decorated
-
- def no_output(self, decorated):
- """
- Subcommand is not expected to return a value, so don't print a spurious None.
- """
- decorated._cli_no_output = True
- return decorated
-
- def subcommand_builder(self, command_name, description=None):
- """
- Decorate a function that builds a subcommand. Builders should accept a
- single argument (the subparser instance) and return the function to be
- run as the command."""
- def wrapper(decorated):
- subparser = self.subparsers.add_parser(command_name)
- func = decorated(subparser)
- subparser.set_defaults(func=func)
- subparser.description = description or func.__doc__
- return wrapper
-
- def run(self):
- "Run cli, processing arguments and executing subcommands."
- arguments = self.argument_parser.parse_args()
- argspec = inspect.getargspec(arguments.func)
- vargs = []
- for arg in argspec.args:
- vargs.append(getattr(arguments, arg))
- if argspec.varargs:
- vargs.extend(getattr(arguments, argspec.varargs))
- output = arguments.func(*vargs)
- if getattr(arguments.func, '_cli_test_command', False):
- self.exit_code = 0 if output else 1
- output = ''
- if getattr(arguments.func, '_cli_no_output', False):
- output = ''
- self.formatter.format_output(output, arguments.format)
- if charmhelpers.core.unitdata._KV:
- charmhelpers.core.unitdata._KV.flush()
-
-
-cmdline = CommandLine()
-
-
-def describe_arguments(func):
- """
- Analyze a function's signature and return a data structure suitable for
- passing in as arguments to an argparse parser's add_argument() method."""
-
- argspec = inspect.getargspec(func)
- # we should probably raise an exception somewhere if func includes **kwargs
- if argspec.defaults:
- positional_args = argspec.args[:-len(argspec.defaults)]
- keyword_names = argspec.args[-len(argspec.defaults):]
- for arg, default in zip(keyword_names, argspec.defaults):
- yield ('--{}'.format(arg),), {'default': default}
- else:
- positional_args = argspec.args
-
- for arg in positional_args:
- yield (arg,), {}
- if argspec.varargs:
- yield (argspec.varargs,), {'nargs': '*'}
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/benchmark.py b/charms/trusty/ceilometer/charmhelpers/cli/benchmark.py
deleted file mode 100644
index b23c16c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/benchmark.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.contrib.benchmark import Benchmark
-
-
-@cmdline.subcommand(command_name='benchmark-start')
-def start():
- Benchmark.start()
-
-
-@cmdline.subcommand(command_name='benchmark-finish')
-def finish():
- Benchmark.finish()
-
-
-@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
-def service(subparser):
- subparser.add_argument("value", help="The composite score.")
- subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
- subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
- return Benchmark.set_composite_score
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/commands.py b/charms/trusty/ceilometer/charmhelpers/cli/commands.py
deleted file mode 100644
index 7e91db0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/commands.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module loads sub-modules into the python runtime so they can be
-discovered via the inspect module. In order to prevent flake8 from (rightfully)
-telling us these are unused modules, throw a ' # noqa' at the end of each import
-so that the warning is suppressed.
-"""
-
-from . import CommandLine # noqa
-
-"""
-Import the sub-modules which have decorated subcommands to register with chlp.
-"""
-from . import host # noqa
-from . import benchmark # noqa
-from . import unitdata # noqa
-from . import hookenv # noqa
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/hookenv.py b/charms/trusty/ceilometer/charmhelpers/cli/hookenv.py
deleted file mode 100644
index 265c816..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/hookenv.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.core import hookenv
-
-
-cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
-cmdline.subcommand('service-name')(hookenv.service_name)
-cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/host.py b/charms/trusty/ceilometer/charmhelpers/cli/host.py
deleted file mode 100644
index 58e78d6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/host.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.core import host
-
-
-@cmdline.subcommand()
-def mounts():
- "List mounts"
- return host.mounts()
-
-
-@cmdline.subcommand_builder('service', description="Control system services")
-def service(subparser):
- subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
- subparser.add_argument("service_name", help="Name of the service to control")
- return host.service
diff --git a/charms/trusty/ceilometer/charmhelpers/cli/unitdata.py b/charms/trusty/ceilometer/charmhelpers/cli/unitdata.py
deleted file mode 100644
index d1cd95b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/cli/unitdata.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from . import cmdline
-from charmhelpers.core import unitdata
-
-
-@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
-def unitdata_cmd(subparser):
- nested = subparser.add_subparsers()
- get_cmd = nested.add_parser('get', help='Retrieve data')
- get_cmd.add_argument('key', help='Key to retrieve the value of')
- get_cmd.set_defaults(action='get', value=None)
- set_cmd = nested.add_parser('set', help='Store data')
- set_cmd.add_argument('key', help='Key to set')
- set_cmd.add_argument('value', help='Value to store')
- set_cmd.set_defaults(action='set')
-
- def _unitdata_cmd(action, key, value):
- if action == 'get':
- return unitdata.kv().get(key)
- elif action == 'set':
- unitdata.kv().set(key, value)
- unitdata.kv().flush()
- return ''
- return _unitdata_cmd
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py
deleted file mode 100644
index 2f24642..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/nrpe.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Compatibility with the nrpe-external-master charm"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import subprocess
-import pwd
-import grp
-import os
-import glob
-import shutil
-import re
-import shlex
-import yaml
-
-from charmhelpers.core.hookenv import (
- config,
- local_unit,
- log,
- relation_ids,
- relation_set,
- relations_of_type,
-)
-
-from charmhelpers.core.host import service
-
-# This module adds compatibility with the nrpe-external-master and plain nrpe
-# subordinate charms. To use it in your charm:
-#
-# 1. Update metadata.yaml
-#
-# provides:
-# (...)
-# nrpe-external-master:
-# interface: nrpe-external-master
-# scope: container
-#
-# and/or
-#
-# provides:
-# (...)
-# local-monitors:
-# interface: local-monitors
-# scope: container
-
-#
-# 2. Add the following to config.yaml
-#
-# nagios_context:
-# default: "juju"
-# type: string
-# description: |
-# Used by the nrpe subordinate charms.
-# A string that will be prepended to instance name to set the host name
-# in nagios. So for instance the hostname would be something like:
-# juju-myservice-0
-# If you're running multiple environments with the same services in them
-# this allows you to differentiate between them.
-# nagios_servicegroups:
-# default: ""
-# type: string
-# description: |
-# A comma-separated list of nagios servicegroups.
-# If left empty, the nagios_context will be used as the servicegroup
-#
-# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
-#
-# 4. Update your hooks.py with something like this:
-#
-# from charmsupport.nrpe import NRPE
-# (...)
-# def update_nrpe_config():
-# nrpe_compat = NRPE()
-# nrpe_compat.add_check(
-# shortname = "myservice",
-# description = "Check MyService",
-# check_cmd = "check_http -w 2 -c 10 http://localhost"
-# )
-# nrpe_compat.add_check(
-# "myservice_other",
-# "Check for widget failures",
-# check_cmd = "/srv/myapp/scripts/widget_check"
-# )
-# nrpe_compat.write()
-#
-# def config_changed():
-# (...)
-# update_nrpe_config()
-#
-# def nrpe_external_master_relation_changed():
-# update_nrpe_config()
-#
-# def local_monitors_relation_changed():
-# update_nrpe_config()
-#
-# 5. ln -s hooks.py nrpe-external-master-relation-changed
-# ln -s hooks.py local-monitors-relation-changed
-
-
-class CheckException(Exception):
- pass
-
-
-class Check(object):
- shortname_re = '[A-Za-z0-9-_]+$'
- service_template = ("""
-#---------------------------------------------------
-# This file is Juju managed
-#---------------------------------------------------
-define service {{
- use active-service
- host_name {nagios_hostname}
- service_description {nagios_hostname}[{shortname}] """
- """{description}
- check_command check_nrpe!{command}
- servicegroups {nagios_servicegroup}
-}}
-""")
-
- def __init__(self, shortname, description, check_cmd):
- super(Check, self).__init__()
- # XXX: could be better to calculate this from the service name
- if not re.match(self.shortname_re, shortname):
- raise CheckException("shortname must match {}".format(
- Check.shortname_re))
- self.shortname = shortname
- self.command = "check_{}".format(shortname)
- # Note: a set of invalid characters is defined by the
- # Nagios server config
- # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
- self.description = description
- self.check_cmd = self._locate_cmd(check_cmd)
-
- def _get_check_filename(self):
- return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
-
- def _get_service_filename(self, hostname):
- return os.path.join(NRPE.nagios_exportdir,
- 'service__{}_{}.cfg'.format(hostname, self.command))
-
- def _locate_cmd(self, check_cmd):
- search_path = (
- '/usr/lib/nagios/plugins',
- '/usr/local/lib/nagios/plugins',
- )
- parts = shlex.split(check_cmd)
- for path in search_path:
- if os.path.exists(os.path.join(path, parts[0])):
- command = os.path.join(path, parts[0])
- if len(parts) > 1:
- command += " " + " ".join(parts[1:])
- return command
- log('Check command not found: {}'.format(parts[0]))
- return ''
-
- def _remove_service_files(self):
- if not os.path.exists(NRPE.nagios_exportdir):
- return
- for f in os.listdir(NRPE.nagios_exportdir):
- if f.endswith('_{}.cfg'.format(self.command)):
- os.remove(os.path.join(NRPE.nagios_exportdir, f))
-
- def remove(self, hostname):
- nrpe_check_file = self._get_check_filename()
- if os.path.exists(nrpe_check_file):
- os.remove(nrpe_check_file)
- self._remove_service_files()
-
- def write(self, nagios_context, hostname, nagios_servicegroups):
- nrpe_check_file = self._get_check_filename()
- with open(nrpe_check_file, 'w') as nrpe_check_config:
- nrpe_check_config.write("# check {}\n".format(self.shortname))
- nrpe_check_config.write("command[{}]={}\n".format(
- self.command, self.check_cmd))
-
- if not os.path.exists(NRPE.nagios_exportdir):
- log('Not writing service config as {} is not accessible'.format(
- NRPE.nagios_exportdir))
- else:
- self.write_service_config(nagios_context, hostname,
- nagios_servicegroups)
-
- def write_service_config(self, nagios_context, hostname,
- nagios_servicegroups):
- self._remove_service_files()
-
- templ_vars = {
- 'nagios_hostname': hostname,
- 'nagios_servicegroup': nagios_servicegroups,
- 'description': self.description,
- 'shortname': self.shortname,
- 'command': self.command,
- }
- nrpe_service_text = Check.service_template.format(**templ_vars)
- nrpe_service_file = self._get_service_filename(hostname)
- with open(nrpe_service_file, 'w') as nrpe_service_config:
- nrpe_service_config.write(str(nrpe_service_text))
-
- def run(self):
- subprocess.call(self.check_cmd)
-
-
-class NRPE(object):
- nagios_logdir = '/var/log/nagios'
- nagios_exportdir = '/var/lib/nagios/export'
- nrpe_confdir = '/etc/nagios/nrpe.d'
-
- def __init__(self, hostname=None):
- super(NRPE, self).__init__()
- self.config = config()
- self.nagios_context = self.config['nagios_context']
- if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
- self.nagios_servicegroups = self.config['nagios_servicegroups']
- else:
- self.nagios_servicegroups = self.nagios_context
- self.unit_name = local_unit().replace('/', '-')
- if hostname:
- self.hostname = hostname
- else:
- nagios_hostname = get_nagios_hostname()
- if nagios_hostname:
- self.hostname = nagios_hostname
- else:
- self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
- self.checks = []
-
- def add_check(self, *args, **kwargs):
- self.checks.append(Check(*args, **kwargs))
-
- def remove_check(self, *args, **kwargs):
- if kwargs.get('shortname') is None:
- raise ValueError('shortname of check must be specified')
-
- # Use sensible defaults if they're not specified - these are not
- # actually used during removal, but they're required for constructing
- # the Check object; check_disk is chosen because it's part of the
- # nagios-plugins-basic package.
- if kwargs.get('check_cmd') is None:
- kwargs['check_cmd'] = 'check_disk'
- if kwargs.get('description') is None:
- kwargs['description'] = ''
-
- check = Check(*args, **kwargs)
- check.remove(self.hostname)
-
- def write(self):
- try:
- nagios_uid = pwd.getpwnam('nagios').pw_uid
- nagios_gid = grp.getgrnam('nagios').gr_gid
- except:
- log("Nagios user not set up, nrpe checks not updated")
- return
-
- if not os.path.exists(NRPE.nagios_logdir):
- os.mkdir(NRPE.nagios_logdir)
- os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
-
- nrpe_monitors = {}
- monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
- for nrpecheck in self.checks:
- nrpecheck.write(self.nagios_context, self.hostname,
- self.nagios_servicegroups)
- nrpe_monitors[nrpecheck.shortname] = {
- "command": nrpecheck.command,
- }
-
- service('restart', 'nagios-nrpe-server')
-
- monitor_ids = relation_ids("local-monitors") + \
- relation_ids("nrpe-external-master")
- for rid in monitor_ids:
- relation_set(relation_id=rid, monitors=yaml.dump(monitors))
-
-
-def get_nagios_hostcontext(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_host_context
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_host_context' in rel:
- return rel['nagios_host_context']
-
-
-def get_nagios_hostname(relation_name='nrpe-external-master'):
- """
- Query relation with nrpe subordinate, return the nagios_hostname
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- for rel in relations_of_type(relation_name):
- if 'nagios_hostname' in rel:
- return rel['nagios_hostname']
-
-
-def get_nagios_unit_name(relation_name='nrpe-external-master'):
- """
- Return the nagios unit name prepended with host_context if needed
-
- :param str relation_name: Name of relation nrpe sub joined to
- """
- host_context = get_nagios_hostcontext(relation_name)
- if host_context:
- unit = "%s:%s" % (host_context, local_unit())
- else:
- unit = local_unit()
- return unit
-
-
-def add_init_service_checks(nrpe, services, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param list services: List of services to check
- :param str unit_name: Unit name to use in check description
- """
- for svc in services:
- upstart_init = '/etc/init/%s.conf' % svc
- sysv_init = '/etc/init.d/%s' % svc
- if os.path.exists(upstart_init):
- # Don't add a check for these services from neutron-gateway
- if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_upstart_job %s' % svc
- )
- elif os.path.exists(sysv_init):
- cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
- cron_file = ('*/5 * * * * root '
- '/usr/local/lib/nagios/plugins/check_exit_status.pl '
- '-s /etc/init.d/%s status > '
- '/var/lib/nagios/service-check-%s.txt\n' % (svc,
- svc)
- )
- f = open(cronpath, 'w')
- f.write(cron_file)
- f.close()
- nrpe.add_check(
- shortname=svc,
- description='process check {%s}' % unit_name,
- check_cmd='check_status_file.py -f '
- '/var/lib/nagios/service-check-%s.txt' % svc,
- )
-
-
-def copy_nrpe_checks():
- """
- Copy the nrpe checks into place
-
- """
- NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
- nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
- 'charmhelpers', 'contrib', 'openstack',
- 'files')
-
- if not os.path.exists(NAGIOS_PLUGINS):
- os.makedirs(NAGIOS_PLUGINS)
- for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
- if os.path.isfile(fname):
- shutil.copy2(fname,
- os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
-
-
-def add_haproxy_checks(nrpe, unit_name):
- """
- Add checks for each service in list
-
- :param NRPE nrpe: NRPE object to add check to
- :param str unit_name: Unit name to use in check description
- """
- nrpe.add_check(
- shortname='haproxy_servers',
- description='Check HAProxy {%s}' % unit_name,
- check_cmd='check_haproxy.sh')
- nrpe.add_check(
- shortname='haproxy_queue',
- description='Check HAProxy queue depth {%s}' % unit_name,
- check_cmd='check_haproxy_queue_depth.sh')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py
deleted file mode 100644
index 320961b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/charmsupport/volumes.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Functions for managing volumes in juju units. One volume is supported per unit.
-Subordinates may have their own storage, provided it is on its own partition.
-
-Configuration stanzas::
-
- volume-ephemeral:
- type: boolean
- default: true
- description: >
- If false, a volume is mounted as sepecified in "volume-map"
- If true, ephemeral storage will be used, meaning that log data
- will only exist as long as the machine. YOU HAVE BEEN WARNED.
- volume-map:
- type: string
- default: {}
- description: >
- YAML map of units to device names, e.g:
- "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
- Service units will raise a configure-error if volume-ephemeral
- is 'true' and no volume-map value is set. Use 'juju set' to set a
- value and 'juju resolved' to complete configuration.
-
-Usage::
-
- from charmsupport.volumes import configure_volume, VolumeConfigurationError
- from charmsupport.hookenv import log, ERROR
- def post_mount_hook():
- stop_service('myservice')
- def post_mount_hook():
- start_service('myservice')
-
- if __name__ == '__main__':
- try:
- configure_volume(before_change=pre_mount_hook,
- after_change=post_mount_hook)
- except VolumeConfigurationError:
- log('Storage could not be configured', ERROR)
-
-'''
-
-# XXX: Known limitations
-# - fstab is neither consulted nor updated
-
-import os
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-import yaml
-
-
-MOUNT_BASE = '/srv/juju/volumes'
-
-
-class VolumeConfigurationError(Exception):
- '''Volume configuration data is missing or invalid'''
- pass
-
-
-def get_config():
- '''Gather and sanity-check volume configuration data'''
- volume_config = {}
- config = hookenv.config()
-
- errors = False
-
- if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
- volume_config['ephemeral'] = True
- else:
- volume_config['ephemeral'] = False
-
- try:
- volume_map = yaml.safe_load(config.get('volume-map', '{}'))
- except yaml.YAMLError as e:
- hookenv.log("Error parsing YAML volume-map: {}".format(e),
- hookenv.ERROR)
- errors = True
- if volume_map is None:
- # probably an empty string
- volume_map = {}
- elif not isinstance(volume_map, dict):
- hookenv.log("Volume-map should be a dictionary, not {}".format(
- type(volume_map)))
- errors = True
-
- volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
- if volume_config['device'] and volume_config['ephemeral']:
- # asked for ephemeral storage but also defined a volume ID
- hookenv.log('A volume is defined for this unit, but ephemeral '
- 'storage was requested', hookenv.ERROR)
- errors = True
- elif not volume_config['device'] and not volume_config['ephemeral']:
- # asked for permanent storage but did not define volume ID
- hookenv.log('Ephemeral storage was requested, but there is no volume '
- 'defined for this unit.', hookenv.ERROR)
- errors = True
-
- unit_mount_name = hookenv.local_unit().replace('/', '-')
- volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
-
- if errors:
- return None
- return volume_config
-
-
-def mount_volume(config):
- if os.path.exists(config['mountpoint']):
- if not os.path.isdir(config['mountpoint']):
- hookenv.log('Not a directory: {}'.format(config['mountpoint']))
- raise VolumeConfigurationError()
- else:
- host.mkdir(config['mountpoint'])
- if os.path.ismount(config['mountpoint']):
- unmount_volume(config)
- if not host.mount(config['device'], config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def unmount_volume(config):
- if os.path.ismount(config['mountpoint']):
- if not host.umount(config['mountpoint'], persist=True):
- raise VolumeConfigurationError()
-
-
-def managed_mounts():
- '''List of all mounted managed volumes'''
- return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
-
-
-def configure_volume(before_change=lambda: None, after_change=lambda: None):
- '''Set up storage (or don't) according to the charm's volume configuration.
- Returns the mount point or "ephemeral". before_change and after_change
- are optional functions to be called if the volume configuration changes.
- '''
-
- config = get_config()
- if not config:
- hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
- raise VolumeConfigurationError()
-
- if config['ephemeral']:
- if os.path.ismount(config['mountpoint']):
- before_change()
- unmount_volume(config)
- after_change()
- return 'ephemeral'
- else:
- # persistent storage
- if os.path.ismount(config['mountpoint']):
- mounts = dict(managed_mounts())
- if mounts.get(config['mountpoint']) != config['device']:
- before_change()
- unmount_volume(config)
- mount_volume(config)
- after_change()
- else:
- before_change()
- mount_volume(config)
- after_change()
- return config['mountpoint']
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py
deleted file mode 100644
index 0091719..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/apache.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import subprocess
-
-from charmhelpers.core.hookenv import (
- config as config_get,
- relation_get,
- relation_ids,
- related_units as relation_list,
- log,
- INFO,
-)
-
-
-def get_cert(cn=None):
- # TODO: deal with multiple https endpoints via charm config
- cert = config_get('ssl_cert')
- key = config_get('ssl_key')
- if not (cert and key):
- log("Inspecting identity-service relations for SSL certificate.",
- level=INFO)
- cert = key = None
- if cn:
- ssl_cert_attr = 'ssl_cert_{}'.format(cn)
- ssl_key_attr = 'ssl_key_{}'.format(cn)
- else:
- ssl_cert_attr = 'ssl_cert'
- ssl_key_attr = 'ssl_key'
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if not cert:
- cert = relation_get(ssl_cert_attr,
- rid=r_id, unit=unit)
- if not key:
- key = relation_get(ssl_key_attr,
- rid=r_id, unit=unit)
- return (cert, key)
-
-
-def get_ca_cert():
- ca_cert = config_get('ssl_ca')
- if ca_cert is None:
- log("Inspecting identity-service relations for CA SSL certificate.",
- level=INFO)
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- if ca_cert is None:
- ca_cert = relation_get('ca_cert',
- rid=r_id, unit=unit)
- return ca_cert
-
-
-def install_ca_cert(ca_cert):
- if ca_cert:
- with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
- 'w') as crt:
- crt.write(ca_cert)
- subprocess.check_call(['update-ca-certificates', '--fresh'])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py b/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py
deleted file mode 100644
index aa0b515..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hahelpers/cluster.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-"""
-Helpers for clustering and determining "cluster leadership" and other
-clustering-related helpers.
-"""
-
-import subprocess
-import os
-
-from socket import gethostname as get_unit_hostname
-
-import six
-
-from charmhelpers.core.hookenv import (
- log,
- relation_ids,
- related_units as relation_list,
- relation_get,
- config as config_get,
- INFO,
- ERROR,
- WARNING,
- unit_get,
- is_leader as juju_is_leader
-)
-from charmhelpers.core.decorators import (
- retry_on_exception,
-)
-from charmhelpers.core.strutils import (
- bool_from_string,
-)
-
-DC_RESOURCE_NAME = 'DC'
-
-
-class HAIncompleteConfig(Exception):
- pass
-
-
-class CRMResourceNotFound(Exception):
- pass
-
-
-class CRMDCNotFound(Exception):
- pass
-
-
-def is_elected_leader(resource):
- """
- Returns True if the charm executing this is the elected cluster leader.
-
- It relies on two mechanisms to determine leadership:
- 1. If juju is sufficiently new and leadership election is supported,
- the is_leader command will be used.
- 2. If the charm is part of a corosync cluster, call corosync to
- determine leadership.
- 3. If the charm is not part of a corosync cluster, the leader is
- determined as being "the alive unit with the lowest unit numer". In
- other words, the oldest surviving unit.
- """
- try:
- return juju_is_leader()
- except NotImplementedError:
- log('Juju leadership election feature not enabled'
- ', using fallback support',
- level=WARNING)
-
- if is_clustered():
- if not is_crm_leader(resource):
- log('Deferring action to CRM leader.', level=INFO)
- return False
- else:
- peers = peer_units()
- if peers and not oldest_peer(peers):
- log('Deferring action to oldest service unit.', level=INFO)
- return False
- return True
-
-
-def is_clustered():
- for r_id in (relation_ids('ha') or []):
- for unit in (relation_list(r_id) or []):
- clustered = relation_get('clustered',
- rid=r_id,
- unit=unit)
- if clustered:
- return True
- return False
-
-
-def is_crm_dc():
- """
- Determine leadership by querying the pacemaker Designated Controller
- """
- cmd = ['crm', 'status']
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError as ex:
- raise CRMDCNotFound(str(ex))
-
- current_dc = ''
- for line in status.split('\n'):
- if line.startswith('Current DC'):
- # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
- current_dc = line.split(':')[1].split()[0]
- if current_dc == get_unit_hostname():
- return True
- elif current_dc == 'NONE':
- raise CRMDCNotFound('Current DC: NONE')
-
- return False
-
-
-@retry_on_exception(5, base_delay=2,
- exc_type=(CRMResourceNotFound, CRMDCNotFound))
-def is_crm_leader(resource, retry=False):
- """
- Returns True if the charm calling this is the elected corosync leader,
- as returned by calling the external "crm" command.
-
- We allow this operation to be retried to avoid the possibility of getting a
- false negative. See LP #1396246 for more info.
- """
- if resource == DC_RESOURCE_NAME:
- return is_crm_dc()
- cmd = ['crm', 'resource', 'show', resource]
- try:
- status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
- if not isinstance(status, six.text_type):
- status = six.text_type(status, "utf-8")
- except subprocess.CalledProcessError:
- status = None
-
- if status and get_unit_hostname() in status:
- return True
-
- if status and "resource %s is NOT running" % (resource) in status:
- raise CRMResourceNotFound("CRM resource %s not found" % (resource))
-
- return False
-
-
-def is_leader(resource):
- log("is_leader is deprecated. Please consider using is_crm_leader "
- "instead.", level=WARNING)
- return is_crm_leader(resource)
-
-
-def peer_units(peer_relation="cluster"):
- peers = []
- for r_id in (relation_ids(peer_relation) or []):
- for unit in (relation_list(r_id) or []):
- peers.append(unit)
- return peers
-
-
-def peer_ips(peer_relation='cluster', addr_key='private-address'):
- '''Return a dict of peers and their private-address'''
- peers = {}
- for r_id in relation_ids(peer_relation):
- for unit in relation_list(r_id):
- peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
- return peers
-
-
-def oldest_peer(peers):
- """Determines who the oldest peer is by comparing unit numbers."""
- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
- for peer in peers:
- remote_unit_no = int(peer.split('/')[1])
- if remote_unit_no < local_unit_no:
- return False
- return True
-
-
-def eligible_leader(resource):
- log("eligible_leader is deprecated. Please consider using "
- "is_elected_leader instead.", level=WARNING)
- return is_elected_leader(resource)
-
-
-def https():
- '''
- Determines whether enough data has been provided in configuration
- or relation data to configure HTTPS
- .
- returns: boolean
- '''
- use_https = config_get('use-https')
- if use_https and bool_from_string(use_https):
- return True
- if config_get('ssl_cert') and config_get('ssl_key'):
- return True
- for r_id in relation_ids('identity-service'):
- for unit in relation_list(r_id):
- # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
- rel_state = [
- relation_get('https_keystone', rid=r_id, unit=unit),
- relation_get('ca_cert', rid=r_id, unit=unit),
- ]
- # NOTE: works around (LP: #1203241)
- if (None not in rel_state) and ('' not in rel_state):
- return True
- return False
-
-
-def determine_api_port(public_port, singlenode_mode=False):
- '''
- Determine correct API server listening port based on
- existence of HTTPS reverse proxy and/or haproxy.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the API service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- if https():
- i += 1
- return public_port - (i * 10)
-
-
-def determine_apache_port(public_port, singlenode_mode=False):
- '''
- Description: Determine correct apache listening port based on public IP +
- state of the cluster.
-
- public_port: int: standard public port for given service
-
- singlenode_mode: boolean: Shuffle ports when only a single unit is present
-
- returns: int: the correct listening port for the HAProxy service
- '''
- i = 0
- if singlenode_mode:
- i += 1
- elif len(peer_units()) > 0 or is_clustered():
- i += 1
- return public_port - (i * 10)
-
-
-def get_hacluster_config(exclude_keys=None):
- '''
- Obtains all relevant configuration from charm configuration required
- for initiating a relation to hacluster:
-
- ha-bindiface, ha-mcastport, vip
-
- param: exclude_keys: list of setting key(s) to be excluded.
- returns: dict: A dict containing settings keyed by setting name.
- raises: HAIncompleteConfig if settings are missing.
- '''
- settings = ['ha-bindiface', 'ha-mcastport', 'vip']
- conf = {}
- for setting in settings:
- if exclude_keys and setting in exclude_keys:
- continue
-
- conf[setting] = config_get(setting)
- missing = []
- [missing.append(s) for s, v in six.iteritems(conf) if v is None]
- if missing:
- log('Insufficient config data to configure hacluster.', level=ERROR)
- raise HAIncompleteConfig
- return conf
-
-
-def canonical_url(configs, vip_setting='vip'):
- '''
- Returns the correct HTTP URL to this host given the state of HTTPS
- configuration and hacluster.
-
- :configs : OSTemplateRenderer: A config tempating object to inspect for
- a complete https context.
-
- :vip_setting: str: Setting in charm config that specifies
- VIP address.
- '''
- scheme = 'http'
- if 'https' in configs.complete_contexts():
- scheme = 'https'
- if is_clustered():
- addr = config_get(vip_setting)
- else:
- addr = unit_get('private-address')
- return '%s://%s' % (scheme, addr)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md
deleted file mode 100644
index 91280c0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/README.hardening.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Juju charm-helpers hardening library
-
-## Description
-
-This library provides multiple implementations of system and application
-hardening that conform to the standards of http://hardening.io/.
-
-Current implementations include:
-
- * OS
- * SSH
- * MySQL
- * Apache
-
-## Requirements
-
-* Juju Charms
-
-## Usage
-
-1. Synchronise this library into your charm and add the harden() decorator
- (from contrib.hardening.harden) to any functions or methods you want to use
- to trigger hardening of your application/system.
-
-2. Add a config option called 'harden' to your charm config.yaml and set it to
- a space-delimited list of hardening modules you want to run e.g. "os ssh"
-
-3. Override any config defaults (contrib.hardening.defaults) by adding a file
- called hardening.yaml to your charm root containing the name(s) of the
- modules whose settings you want override at root level and then any settings
- with overrides e.g.
-
- os:
- general:
- desktop_enable: True
-
-4. Now just run your charm as usual and hardening will be applied each time the
- hook runs.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py
deleted file mode 100644
index a133532..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py
deleted file mode 100644
index d130479..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.apache.checks import config
-
-
-def run_apache_checks():
- log("Starting Apache hardening checks.", level=DEBUG)
- checks = config.get_audits()
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("Apache hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py
deleted file mode 100644
index 8249ca0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/checks/config.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import subprocess
-
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- DirectoryPermissionAudit,
- NoReadWriteForOther,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
-from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get Apache hardening config audits.
-
- :returns: dictionary of audits
- """
- if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
- log("Apache server does not appear to be installed on this node - "
- "skipping apache hardening", level=INFO)
- return []
-
- context = ApacheConfContext()
- settings = utils.get_settings('apache')
- audits = [
- FilePermissionAudit(paths='/etc/apache2/apache2.conf', user='root',
- group='root', mode=0o0640),
-
- TemplatedFile(os.path.join(settings['common']['apache_dir'],
- 'mods-available/alias.conf'),
- context,
- TEMPLATES_DIR,
- mode=0o0755,
- user='root',
- service_actions=[{'service': 'apache2',
- 'actions': ['restart']}]),
-
- TemplatedFile(os.path.join(settings['common']['apache_dir'],
- 'conf-enabled/hardening.conf'),
- context,
- TEMPLATES_DIR,
- mode=0o0640,
- user='root',
- service_actions=[{'service': 'apache2',
- 'actions': ['restart']}]),
-
- DirectoryPermissionAudit(settings['common']['apache_dir'],
- user='root',
- group='root',
- mode=0o640),
-
- DisabledModuleAudit(settings['hardening']['modules_to_disable']),
-
- NoReadWriteForOther(settings['common']['apache_dir']),
- ]
-
- return audits
-
-
-class ApacheConfContext(object):
- """Defines the set of key/value pairs to set in a apache config file.
-
- This context, when called, will return a dictionary containing the
- key/value pairs of setting to specify in the
- /etc/apache/conf-enabled/hardening.conf file.
- """
- def __call__(self):
- settings = utils.get_settings('apache')
- ctxt = settings['hardening']
-
- out = subprocess.check_output(['apache2', '-v'])
- ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
- out).group(1)
- ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
- ctxt['traceenable'] = settings['hardening']['traceenable']
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf
deleted file mode 100644
index e46a58a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/alias.conf
+++ /dev/null
@@ -1,31 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-<IfModule alias_module>
- #
- # Aliases: Add here as many aliases as you need (with no limit). The format is
- # Alias fakename realname
- #
- # Note that if you include a trailing / on fakename then the server will
- # require it to be present in the URL. So "/icons" isn't aliased in this
- # example, only "/icons/". If the fakename is slash-terminated, then the
- # realname must also be slash terminated, and if the fakename omits the
- # trailing slash, the realname must also omit it.
- #
- # We include the /icons/ alias for FancyIndexed directory listings. If
- # you do not use FancyIndexing, you may comment this out.
- #
- Alias /icons/ "{{ apache_icondir }}/"
-
- <Directory "{{ apache_icondir }}">
- Options -Indexes -MultiViews -FollowSymLinks
- AllowOverride None
-{% if apache_version == '2.4' -%}
- Require all granted
-{% else -%}
- Order allow,deny
- Allow from all
-{% endif %}
- </Directory>
-</IfModule>
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf
deleted file mode 100644
index 0794541..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/apache/templates/hardening.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-
-<Location / >
- <LimitExcept {{ allowed_http_methods }} >
- # http://httpd.apache.org/docs/2.4/upgrading.html
- {% if apache_version > '2.2' -%}
- Require all granted
- {% else -%}
- Order Allow,Deny
- Deny from all
- {% endif %}
- </LimitExcept>
-</Location>
-
-TraceEnable {{ traceenable }}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py
deleted file mode 100644
index 6a7057b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/__init__.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-
-class BaseAudit(object): # NO-QA
- """Base class for hardening checks.
-
- The lifecycle of a hardening check is to first check to see if the system
- is in compliance for the specified check. If it is not in compliance, the
- check method will return a value which will be supplied to the.
- """
- def __init__(self, *args, **kwargs):
- self.unless = kwargs.get('unless', None)
- super(BaseAudit, self).__init__()
-
- def ensure_compliance(self):
- """Checks to see if the current hardening check is in compliance or
- not.
-
- If the check that is performed is not in compliance, then an exception
- should be raised.
- """
- pass
-
- def _take_action(self):
- """Determines whether to perform the action or not.
-
- Checks whether or not an action should be taken. This is determined by
- the truthy value for the unless parameter. If unless is a callback
- method, it will be invoked with no parameters in order to determine
- whether or not the action should be taken. Otherwise, the truthy value
- of the unless attribute will determine if the action should be
- performed.
- """
- # Do the action if there isn't an unless override.
- if self.unless is None:
- return True
-
- # Invoke the callback if there is one.
- if hasattr(self.unless, '__call__'):
- results = self.unless()
- if results:
- return False
- else:
- return True
-
- if self.unless:
- return False
- else:
- return True
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py
deleted file mode 100644
index cf3c987..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apache.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-import subprocess
-
-from six import string_types
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
- ERROR,
-)
-
-from charmhelpers.contrib.hardening.audits import BaseAudit
-
-
-class DisabledModuleAudit(BaseAudit):
- """Audits Apache2 modules.
-
- Determines if the apache2 modules are enabled. If the modules are enabled
- then they are removed in the ensure_compliance.
- """
- def __init__(self, modules):
- if modules is None:
- self.modules = []
- elif isinstance(modules, string_types):
- self.modules = [modules]
- else:
- self.modules = modules
-
- def ensure_compliance(self):
- """Ensures that the modules are not loaded."""
- if not self.modules:
- return
-
- try:
- loaded_modules = self._get_loaded_modules()
- non_compliant_modules = []
- for module in self.modules:
- if module in loaded_modules:
- log("Module '%s' is enabled but should not be." %
- (module), level=INFO)
- non_compliant_modules.append(module)
-
- if len(non_compliant_modules) == 0:
- return
-
- for module in non_compliant_modules:
- self._disable_module(module)
- self._restart_apache()
- except subprocess.CalledProcessError as e:
- log('Error occurred auditing apache module compliance. '
- 'This may have been already reported. '
- 'Output is: %s' % e.output, level=ERROR)
-
- @staticmethod
- def _get_loaded_modules():
- """Returns the modules which are enabled in Apache."""
- output = subprocess.check_output(['apache2ctl', '-M'])
- modules = []
- for line in output.strip().split():
- # Each line of the enabled module output looks like:
- # module_name (static|shared)
- # Plus a header line at the top of the output which is stripped
- # out by the regex.
- matcher = re.search(r'^ (\S*)', line)
- if matcher:
- modules.append(matcher.group(1))
- return modules
-
- @staticmethod
- def _disable_module(module):
- """Disables the specified module in Apache."""
- try:
- subprocess.check_call(['a2dismod', module])
- except subprocess.CalledProcessError as e:
- # Note: catch error here to allow the attempt of disabling
- # multiple modules in one go rather than failing after the
- # first module fails.
- log('Error occurred disabling module %s. '
- 'Output is: %s' % (module, e.output), level=ERROR)
-
- @staticmethod
- def _restart_apache():
- """Restarts the apache process"""
- subprocess.check_output(['service', 'apache2', 'restart'])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py
deleted file mode 100644
index e94af03..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/apt.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import # required for external apt import
-from apt import apt_pkg
-from six import string_types
-
-from charmhelpers.fetch import (
- apt_cache,
- apt_purge
-)
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- WARNING,
-)
-from charmhelpers.contrib.hardening.audits import BaseAudit
-
-
-class AptConfig(BaseAudit):
-
- def __init__(self, config, **kwargs):
- self.config = config
-
- def verify_config(self):
- apt_pkg.init()
- for cfg in self.config:
- value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
- if value and value != cfg['expected']:
- log("APT config '%s' has unexpected value '%s' "
- "(expected='%s')" %
- (cfg['key'], value, cfg['expected']), level=WARNING)
-
- def ensure_compliance(self):
- self.verify_config()
-
-
-class RestrictedPackages(BaseAudit):
- """Class used to audit restricted packages on the system."""
-
- def __init__(self, pkgs, **kwargs):
- super(RestrictedPackages, self).__init__(**kwargs)
- if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
- self.pkgs = [pkgs]
- else:
- self.pkgs = pkgs
-
- def ensure_compliance(self):
- cache = apt_cache()
-
- for p in self.pkgs:
- if p not in cache:
- continue
-
- pkg = cache[p]
- if not self.is_virtual_package(pkg):
- if not pkg.current_ver:
- log("Package '%s' is not installed." % pkg.name,
- level=DEBUG)
- continue
- else:
- log("Restricted package '%s' is installed" % pkg.name,
- level=WARNING)
- self.delete_package(cache, pkg)
- else:
- log("Checking restricted virtual package '%s' provides" %
- pkg.name, level=DEBUG)
- self.delete_package(cache, pkg)
-
- def delete_package(self, cache, pkg):
- """Deletes the package from the system.
-
- Deletes the package form the system, properly handling virtual
- packages.
-
- :param cache: the apt cache
- :param pkg: the package to remove
- """
- if self.is_virtual_package(pkg):
- log("Package '%s' appears to be virtual - purging provides" %
- pkg.name, level=DEBUG)
- for _p in pkg.provides_list:
- self.delete_package(cache, _p[2].parent_pkg)
- elif not pkg.current_ver:
- log("Package '%s' not installed" % pkg.name, level=DEBUG)
- return
- else:
- log("Purging package '%s'" % pkg.name, level=DEBUG)
- apt_purge(pkg.name)
-
- def is_virtual_package(self, pkg):
- return pkg.has_provides and not pkg.has_versions
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py
deleted file mode 100644
index 0fb545a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/audits/file.py
+++ /dev/null
@@ -1,552 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import grp
-import os
-import pwd
-import re
-
-from subprocess import (
- CalledProcessError,
- check_output,
- check_call,
-)
-from traceback import format_exc
-from six import string_types
-from stat import (
- S_ISGID,
- S_ISUID
-)
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core import unitdata
-from charmhelpers.core.host import file_hash
-from charmhelpers.contrib.hardening.audits import BaseAudit
-from charmhelpers.contrib.hardening.templating import (
- get_template_path,
- render_and_write,
-)
-from charmhelpers.contrib.hardening import utils
-
-
-class BaseFileAudit(BaseAudit):
- """Base class for file audits.
-
- Provides api stubs for compliance check flow that must be used by any class
- that implemented this one.
- """
-
- def __init__(self, paths, always_comply=False, *args, **kwargs):
- """
- :param paths: string path of list of paths of files we want to apply
- compliance checks are criteria to.
- :param always_comply: if true compliance criteria is always applied
- else compliance is skipped for non-existent
- paths.
- """
- super(BaseFileAudit, self).__init__(*args, **kwargs)
- self.always_comply = always_comply
- if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
- self.paths = [paths]
- else:
- self.paths = paths
-
- def ensure_compliance(self):
- """Ensure that the all registered files comply to registered criteria.
- """
- for p in self.paths:
- if os.path.exists(p):
- if self.is_compliant(p):
- continue
-
- log('File %s is not in compliance.' % p, level=INFO)
- else:
- if not self.always_comply:
- log("Non-existent path '%s' - skipping compliance check"
- % (p), level=INFO)
- continue
-
- if self._take_action():
- log("Applying compliance criteria to '%s'" % (p), level=INFO)
- self.comply(p)
-
- def is_compliant(self, path):
- """Audits the path to see if it is compliance.
-
- :param path: the path to the file that should be checked.
- """
- raise NotImplementedError
-
- def comply(self, path):
- """Enforces the compliance of a path.
-
- :param path: the path to the file that should be enforced.
- """
- raise NotImplementedError
-
- @classmethod
- def _get_stat(cls, path):
- """Returns the Posix st_stat information for the specified file path.
-
- :param path: the path to get the st_stat information for.
- :returns: an st_stat object for the path or None if the path doesn't
- exist.
- """
- return os.stat(path)
-
-
-class FilePermissionAudit(BaseFileAudit):
- """Implements an audit for file permissions and ownership for a user.
-
- This class implements functionality that ensures that a specific user/group
- will own the file(s) specified and that the permissions specified are
- applied properly to the file.
- """
- def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
- self.user = user
- self.group = group
- self.mode = mode
- super(FilePermissionAudit, self).__init__(paths, user, group, mode,
- **kwargs)
-
- @property
- def user(self):
- return self._user
-
- @user.setter
- def user(self, name):
- try:
- user = pwd.getpwnam(name)
- except KeyError:
- log('Unknown user %s' % name, level=ERROR)
- user = None
- self._user = user
-
- @property
- def group(self):
- return self._group
-
- @group.setter
- def group(self, name):
- try:
- group = None
- if name:
- group = grp.getgrnam(name)
- else:
- group = grp.getgrgid(self.user.pw_gid)
- except KeyError:
- log('Unknown group %s' % name, level=ERROR)
- self._group = group
-
- def is_compliant(self, path):
- """Checks if the path is in compliance.
-
- Used to determine if the path specified meets the necessary
- requirements to be in compliance with the check itself.
-
- :param path: the file path to check
- :returns: True if the path is compliant, False otherwise.
- """
- stat = self._get_stat(path)
- user = self.user
- group = self.group
-
- compliant = True
- if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
- log('File %s is not owned by %s:%s.' % (path, user.pw_name,
- group.gr_name),
- level=INFO)
- compliant = False
-
- # POSIX refers to the st_mode bits as corresponding to both the
- # file type and file permission bits, where the least significant 12
- # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
- # file permission bits (8-0)
- perms = stat.st_mode & 0o7777
- if perms != self.mode:
- log('File %s has incorrect permissions, currently set to %s' %
- (path, oct(stat.st_mode & 0o7777)), level=INFO)
- compliant = False
-
- return compliant
-
- def comply(self, path):
- """Issues a chown and chmod to the file paths specified."""
- utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
- self.mode)
-
-
-class DirectoryPermissionAudit(FilePermissionAudit):
- """Performs a permission check for the specified directory path."""
-
- def __init__(self, paths, user, group=None, mode=0o600,
- recursive=True, **kwargs):
- super(DirectoryPermissionAudit, self).__init__(paths, user, group,
- mode, **kwargs)
- self.recursive = recursive
-
- def is_compliant(self, path):
- """Checks if the directory is compliant.
-
- Used to determine if the path specified and all of its children
- directories are in compliance with the check itself.
-
- :param path: the directory path to check
- :returns: True if the directory tree is compliant, otherwise False.
- """
- if not os.path.isdir(path):
- log('Path specified %s is not a directory.' % path, level=ERROR)
- raise ValueError("%s is not a directory." % path)
-
- if not self.recursive:
- return super(DirectoryPermissionAudit, self).is_compliant(path)
-
- compliant = True
- for root, dirs, _ in os.walk(path):
- if len(dirs) > 0:
- continue
-
- if not super(DirectoryPermissionAudit, self).is_compliant(root):
- compliant = False
- continue
-
- return compliant
-
- def comply(self, path):
- for root, dirs, _ in os.walk(path):
- if len(dirs) > 0:
- super(DirectoryPermissionAudit, self).comply(root)
-
-
-class ReadOnly(BaseFileAudit):
- """Audits that files and folders are read only."""
- def __init__(self, paths, *args, **kwargs):
- super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
-
- def is_compliant(self, path):
- try:
- output = check_output(['find', path, '-perm', '-go+w',
- '-type', 'f']).strip()
-
- # The find above will find any files which have permission sets
- # which allow too broad of write access. As such, the path is
- # compliant if there is no output.
- if output:
- return False
-
- return True
- except CalledProcessError as e:
- log('Error occurred checking finding writable files for %s. '
- 'Error information is: command %s failed with returncode '
- '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
- format_exc(e)), level=ERROR)
- return False
-
- def comply(self, path):
- try:
- check_output(['chmod', 'go-w', '-R', path])
- except CalledProcessError as e:
- log('Error occurred removing writeable permissions for %s. '
- 'Error information is: command %s failed with returncode '
- '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
- format_exc(e)), level=ERROR)
-
-
-class NoReadWriteForOther(BaseFileAudit):
- """Ensures that the files found under the base path are readable or
- writable by anyone other than the owner or the group.
- """
- def __init__(self, paths):
- super(NoReadWriteForOther, self).__init__(paths)
-
- def is_compliant(self, path):
- try:
- cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
- '-perm', '-o+w', '-type', 'f']
- output = check_output(cmd).strip()
-
- # The find above here will find any files which have read or
- # write permissions for other, meaning there is too broad of access
- # to read/write the file. As such, the path is compliant if there's
- # no output.
- if output:
- return False
-
- return True
- except CalledProcessError as e:
- log('Error occurred while finding files which are readable or '
- 'writable to the world in %s. '
- 'Command output is: %s.' % (path, e.output), level=ERROR)
-
- def comply(self, path):
- try:
- check_output(['chmod', '-R', 'o-rw', path])
- except CalledProcessError as e:
- log('Error occurred attempting to change modes of files under '
- 'path %s. Output of command is: %s' % (path, e.output))
-
-
-class NoSUIDSGIDAudit(BaseFileAudit):
- """Audits that specified files do not have SUID/SGID bits set."""
- def __init__(self, paths, *args, **kwargs):
- super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
-
- def is_compliant(self, path):
- stat = self._get_stat(path)
- if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
- return False
-
- return True
-
- def comply(self, path):
- try:
- log('Removing suid/sgid from %s.' % path, level=DEBUG)
- check_output(['chmod', '-s', path])
- except CalledProcessError as e:
- log('Error occurred removing suid/sgid from %s.'
- 'Error information is: command %s failed with returncode '
- '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
- format_exc(e)), level=ERROR)
-
-
-class TemplatedFile(BaseFileAudit):
- """The TemplatedFileAudit audits the contents of a templated file.
-
- This audit renders a file from a template, sets the appropriate file
- permissions, then generates a hashsum with which to check the content
- changed.
- """
- def __init__(self, path, context, template_dir, mode, user='root',
- group='root', service_actions=None, **kwargs):
- self.context = context
- self.user = user
- self.group = group
- self.mode = mode
- self.template_dir = template_dir
- self.service_actions = service_actions
- super(TemplatedFile, self).__init__(paths=path, always_comply=True,
- **kwargs)
-
- def is_compliant(self, path):
- """Determines if the templated file is compliant.
-
- A templated file is only compliant if it has not changed (as
- determined by its sha256 hashsum) AND its file permissions are set
- appropriately.
-
- :param path: the path to check compliance.
- """
- same_templates = self.templates_match(path)
- same_content = self.contents_match(path)
- same_permissions = self.permissions_match(path)
-
- if same_content and same_permissions and same_templates:
- return True
-
- return False
-
- def run_service_actions(self):
- """Run any actions on services requested."""
- if not self.service_actions:
- return
-
- for svc_action in self.service_actions:
- name = svc_action['service']
- actions = svc_action['actions']
- log("Running service '%s' actions '%s'" % (name, actions),
- level=DEBUG)
- for action in actions:
- cmd = ['service', name, action]
- try:
- check_call(cmd)
- except CalledProcessError as exc:
- log("Service name='%s' action='%s' failed - %s" %
- (name, action, exc), level=WARNING)
-
- def comply(self, path):
- """Ensures the contents and the permissions of the file.
-
- :param path: the path to correct
- """
- dirname = os.path.dirname(path)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- self.pre_write()
- render_and_write(self.template_dir, path, self.context())
- utils.ensure_permissions(path, self.user, self.group, self.mode)
- self.run_service_actions()
- self.save_checksum(path)
- self.post_write()
-
- def pre_write(self):
- """Invoked prior to writing the template."""
- pass
-
- def post_write(self):
- """Invoked after writing the template."""
- pass
-
- def templates_match(self, path):
- """Determines if the template files are the same.
-
- The template file equality is determined by the hashsum of the
- template files themselves. If there is no hashsum, then the content
- cannot be sure to be the same so treat it as if they changed.
- Otherwise, return whether or not the hashsums are the same.
-
- :param path: the path to check
- :returns: boolean
- """
- template_path = get_template_path(self.template_dir, path)
- key = 'hardening:template:%s' % template_path
- template_checksum = file_hash(template_path)
- kv = unitdata.kv()
- stored_tmplt_checksum = kv.get(key)
- if not stored_tmplt_checksum:
- kv.set(key, template_checksum)
- kv.flush()
- log('Saved template checksum for %s.' % template_path,
- level=DEBUG)
- # Since we don't have a template checksum, then assume it doesn't
- # match and return that the template is different.
- return False
- elif stored_tmplt_checksum != template_checksum:
- kv.set(key, template_checksum)
- kv.flush()
- log('Updated template checksum for %s.' % template_path,
- level=DEBUG)
- return False
-
- # Here the template hasn't changed based upon the calculated
- # checksum of the template and what was previously stored.
- return True
-
- def contents_match(self, path):
- """Determines if the file content is the same.
-
- This is determined by comparing hashsum of the file contents and
- the saved hashsum. If there is no hashsum, then the content cannot
- be sure to be the same so treat them as if they are not the same.
- Otherwise, return True if the hashsums are the same, False if they
- are not the same.
-
- :param path: the file to check.
- """
- checksum = file_hash(path)
-
- kv = unitdata.kv()
- stored_checksum = kv.get('hardening:%s' % path)
- if not stored_checksum:
- # If the checksum hasn't been generated, return False to ensure
- # the file is written and the checksum stored.
- log('Checksum for %s has not been calculated.' % path, level=DEBUG)
- return False
- elif stored_checksum != checksum:
- log('Checksum mismatch for %s.' % path, level=DEBUG)
- return False
-
- return True
-
- def permissions_match(self, path):
- """Determines if the file owner and permissions match.
-
- :param path: the path to check.
- """
- audit = FilePermissionAudit(path, self.user, self.group, self.mode)
- return audit.is_compliant(path)
-
- def save_checksum(self, path):
- """Calculates and saves the checksum for the path specified.
-
- :param path: the path of the file to save the checksum.
- """
- checksum = file_hash(path)
- kv = unitdata.kv()
- kv.set('hardening:%s' % path, checksum)
- kv.flush()
-
-
-class DeletedFile(BaseFileAudit):
- """Audit to ensure that a file is deleted."""
- def __init__(self, paths):
- super(DeletedFile, self).__init__(paths)
-
- def is_compliant(self, path):
- return not os.path.exists(path)
-
- def comply(self, path):
- os.remove(path)
-
-
-class FileContentAudit(BaseFileAudit):
- """Audit the contents of a file."""
- def __init__(self, paths, cases, **kwargs):
- # Cases we expect to pass
- self.pass_cases = cases.get('pass', [])
- # Cases we expect to fail
- self.fail_cases = cases.get('fail', [])
- super(FileContentAudit, self).__init__(paths, **kwargs)
-
- def is_compliant(self, path):
- """
- Given a set of content matching cases i.e. tuple(regex, bool) where
- bool value denotes whether or not regex is expected to match, check that
- all cases match as expected with the contents of the file. Cases can be
- expected to pass of fail.
-
- :param path: Path of file to check.
- :returns: Boolean value representing whether or not all cases are
- found to be compliant.
- """
- log("Auditing contents of file '%s'" % (path), level=DEBUG)
- with open(path, 'r') as fd:
- contents = fd.read()
-
- matches = 0
- for pattern in self.pass_cases:
- key = re.compile(pattern, flags=re.MULTILINE)
- results = re.search(key, contents)
- if results:
- matches += 1
- else:
- log("Pattern '%s' was expected to pass but instead it failed"
- % (pattern), level=WARNING)
-
- for pattern in self.fail_cases:
- key = re.compile(pattern, flags=re.MULTILINE)
- results = re.search(key, contents)
- if not results:
- matches += 1
- else:
- log("Pattern '%s' was expected to fail but instead it passed"
- % (pattern), level=WARNING)
-
- total = len(self.pass_cases) + len(self.fail_cases)
- log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
- return matches == total
-
- def comply(self, *args, **kwargs):
- """NOOP since we just issue warnings. This is to avoid the
- NotImplememtedError.
- """
- log("Not applying any compliance criteria, only checks.", level=INFO)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml
deleted file mode 100644
index e5ada29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-# NOTE: this file contains the default configuration for the 'apache' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'apache' as the root key followed by any of the following with new
-# values.
-
-common:
- apache_dir: '/etc/apache2'
-
-hardening:
- traceenable: 'off'
- allowed_http_methods: "GET POST"
- modules_to_disable: [ cgi, cgid ] \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
deleted file mode 100644
index 227589b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/apache.yaml.schema
+++ /dev/null
@@ -1,9 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-common:
- apache_dir:
- traceenable:
-
-hardening:
- allowed_http_methods:
- modules_to_disable:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml
deleted file mode 100644
index 682d22b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# NOTE: this file contains the default configuration for the 'mysql' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'mysql' as the root key followed by any of the following with new
-# values.
-
-hardening:
- mysql-conf: /etc/mysql/my.cnf
- hardening-conf: /etc/mysql/conf.d/hardening.cnf
-
-security:
- # @see http://www.symantec.com/connect/articles/securing-mysql-step-step
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
- chroot: None
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
- safe-user-create: 1
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
- secure-auth: 1
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
- skip-symbolic-links: 1
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
- skip-show-database: True
-
- # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
- local-infile: 0
-
- # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
- allow-suspicious-udfs: 0
-
- # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
- automatic-sp-privileges: 0
-
- # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
- secure-file-priv: /tmp
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
deleted file mode 100644
index 2edf325..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema
+++ /dev/null
@@ -1,15 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-hardening:
- mysql-conf:
- hardening-conf:
-security:
- chroot:
- safe-user-create:
- secure-auth:
- skip-symbolic-links:
- skip-show-database:
- local-infile:
- allow-suspicious-udfs:
- automatic-sp-privileges:
- secure-file-priv:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml
deleted file mode 100644
index ddd4286..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-# NOTE: this file contains the default configuration for the 'os' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'os' as the root key followed by any of the following with new
-# values.
-
-general:
- desktop_enable: False # (type:boolean)
-
-environment:
- extra_user_paths: []
- umask: 027
- root_path: /
-
-auth:
- pw_max_age: 60
- # discourage password cycling
- pw_min_age: 7
- retries: 5
- lockout_time: 600
- timeout: 60
- allow_homeless: False # (type:boolean)
- pam_passwdqc_enable: True # (type:boolean)
- pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
- root_ttys:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
- uid_min: 1000
- gid_min: 1000
- sys_uid_min: 100
- sys_uid_max: 999
- sys_gid_min: 100
- sys_gid_max: 999
- chfn_restrict:
-
-security:
- users_allow: []
- suid_sgid_enforce: True # (type:boolean)
- # user-defined blacklist and whitelist
- suid_sgid_blacklist: []
- suid_sgid_whitelist: []
- # if this is True, remove any suid/sgid bits from files that were not in the whitelist
- suid_sgid_dry_run_on_unknown: False # (type:boolean)
- suid_sgid_remove_from_unknown: False # (type:boolean)
- # remove packages with known issues
- packages_clean: True # (type:boolean)
- packages_list:
- xinetd
- inetd
- ypserv
- telnet-server
- rsh-server
- rsync
- kernel_enable_module_loading: True # (type:boolean)
- kernel_enable_core_dump: False # (type:boolean)
-
-sysctl:
- kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
- kernel_enable_sysrq: False # (type:boolean)
- forwarding: False # (type:boolean)
- ipv6_enable: False # (type:boolean)
- arp_restricted: True # (type:boolean)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema
deleted file mode 100644
index 88b3966..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/os.yaml.schema
+++ /dev/null
@@ -1,42 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-general:
- desktop_enable:
-environment:
- extra_user_paths:
- umask:
- root_path:
-auth:
- pw_max_age:
- pw_min_age:
- retries:
- lockout_time:
- timeout:
- allow_homeless:
- pam_passwdqc_enable:
- pam_passwdqc_options:
- root_ttys:
- uid_min:
- gid_min:
- sys_uid_min:
- sys_uid_max:
- sys_gid_min:
- sys_gid_max:
- chfn_restrict:
-security:
- users_allow:
- suid_sgid_enforce:
- suid_sgid_blacklist:
- suid_sgid_whitelist:
- suid_sgid_dry_run_on_unknown:
- suid_sgid_remove_from_unknown:
- packages_clean:
- packages_list:
- kernel_enable_module_loading:
- kernel_enable_core_dump:
-sysctl:
- kernel_secure_sysrq:
- kernel_enable_sysrq:
- forwarding:
- ipv6_enable:
- arp_restricted:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml
deleted file mode 100644
index cd529bc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
-# NOTE: this file contains the default configuration for the 'ssh' hardening
-# code. If you want to override any settings you must add them to a file
-# called hardening.yaml in the root directory of your charm using the
-# name 'ssh' as the root key followed by any of the following with new
-# values.
-
-common:
- service_name: 'ssh'
- network_ipv6_enable: False # (type:boolean)
- ports: [22]
- remote_hosts: []
-
-client:
- package: 'openssh-client'
- cbc_required: False # (type:boolean)
- weak_hmac: False # (type:boolean)
- weak_kex: False # (type:boolean)
- roaming: False
- password_authentication: 'no'
-
-server:
- host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
- '/etc/ssh/ssh_host_ecdsa_key']
- cbc_required: False # (type:boolean)
- weak_hmac: False # (type:boolean)
- weak_kex: False # (type:boolean)
- allow_root_with_key: False # (type:boolean)
- allow_tcp_forwarding: 'no'
- allow_agent_forwarding: 'no'
- allow_x11_forwarding: 'no'
- use_privilege_separation: 'sandbox'
- listen_to: ['0.0.0.0']
- use_pam: 'no'
- package: 'openssh-server'
- password_authentication: 'no'
- alive_interval: '600'
- alive_count: '3'
- sftp_enable: False # (type:boolean)
- sftp_group: 'sftponly'
- sftp_chroot: '/home/%u'
- deny_users: []
- allow_users: []
- deny_groups: []
- allow_groups: []
- print_motd: 'no'
- print_last_log: 'no'
- use_dns: 'no'
- max_auth_tries: 2
- max_sessions: 10
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
deleted file mode 100644
index d05e054..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema
+++ /dev/null
@@ -1,42 +0,0 @@
-# NOTE: this schema must contain all valid keys from it's associated defaults
-# file. It is used to validate user-provided overrides.
-common:
- service_name:
- network_ipv6_enable:
- ports:
- remote_hosts:
-client:
- package:
- cbc_required:
- weak_hmac:
- weak_kex:
- roaming:
- password_authentication:
-server:
- host_key_files:
- cbc_required:
- weak_hmac:
- weak_kex:
- allow_root_with_key:
- allow_tcp_forwarding:
- allow_agent_forwarding:
- allow_x11_forwarding:
- use_privilege_separation:
- listen_to:
- use_pam:
- package:
- password_authentication:
- alive_interval:
- alive_count:
- sftp_enable:
- sftp_group:
- sftp_chroot:
- deny_users:
- allow_users:
- deny_groups:
- allow_groups:
- print_motd:
- print_last_log:
- use_dns:
- max_auth_tries:
- max_sessions:
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py
deleted file mode 100644
index ac7568d..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/harden.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-from collections import OrderedDict
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- DEBUG,
- WARNING,
-)
-from charmhelpers.contrib.hardening.host.checks import run_os_checks
-from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
-from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
-from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
-
-
-def harden(overrides=None):
- """Hardening decorator.
-
- This is the main entry point for running the hardening stack. In order to
- run modules of the stack you must add this decorator to charm hook(s) and
- ensure that your charm config.yaml contains the 'harden' option set to
- one or more of the supported modules. Setting these will cause the
- corresponding hardening code to be run when the hook fires.
-
- This decorator can and should be applied to more than one hook or function
- such that hardening modules are called multiple times. This is because
- subsequent calls will perform auditing checks that will report any changes
- to resources hardened by the first run (and possibly perform compliance
- actions as a result of any detected infractions).
-
- :param overrides: Optional list of stack modules used to override those
- provided with 'harden' config.
- :returns: Returns value returned by decorated function once executed.
- """
- def _harden_inner1(f):
- log("Hardening function '%s'" % (f.__name__), level=DEBUG)
-
- def _harden_inner2(*args, **kwargs):
- RUN_CATALOG = OrderedDict([('os', run_os_checks),
- ('ssh', run_ssh_checks),
- ('mysql', run_mysql_checks),
- ('apache', run_apache_checks)])
-
- enabled = overrides or (config("harden") or "").split()
- if enabled:
- modules_to_run = []
- # modules will always be performed in the following order
- for module, func in six.iteritems(RUN_CATALOG):
- if module in enabled:
- enabled.remove(module)
- modules_to_run.append(func)
-
- if enabled:
- log("Unknown hardening modules '%s' - ignoring" %
- (', '.join(enabled)), level=WARNING)
-
- for hardener in modules_to_run:
- log("Executing hardening module '%s'" %
- (hardener.__name__), level=DEBUG)
- hardener()
- else:
- log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
-
- return f(*args, **kwargs)
- return _harden_inner2
-
- return _harden_inner1
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py
deleted file mode 100644
index c3bd598..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.host.checks import (
- apt,
- limits,
- login,
- minimize_access,
- pam,
- profile,
- securetty,
- suid_sgid,
- sysctl
-)
-
-
-def run_os_checks():
- log("Starting OS hardening checks.", level=DEBUG)
- checks = apt.get_audits()
- checks.extend(limits.get_audits())
- checks.extend(login.get_audits())
- checks.extend(minimize_access.get_audits())
- checks.extend(pam.get_audits())
- checks.extend(profile.get_audits())
- checks.extend(securetty.get_audits())
- checks.extend(suid_sgid.get_audits())
- checks.extend(sysctl.get_audits())
-
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("OS hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py
deleted file mode 100644
index 2c221cd..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/apt.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.utils import get_settings
-from charmhelpers.contrib.hardening.audits.apt import (
- AptConfig,
- RestrictedPackages,
-)
-
-
-def get_audits():
- """Get OS hardening apt audits.
-
- :returns: dictionary of audits
- """
- audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
- 'expected': 'false'}])]
-
- settings = get_settings('os')
- clean_packages = settings['security']['packages_clean']
- if clean_packages:
- security_packages = settings['security']['packages_list']
- if security_packages:
- audits.append(RestrictedPackages(security_packages))
-
- return audits
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py
deleted file mode 100644
index 8ce9dc2..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/limits.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import (
- DirectoryPermissionAudit,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening security limits audits.
-
- :returns: dictionary of audits
- """
- audits = []
- settings = utils.get_settings('os')
-
- # Ensure that the /etc/security/limits.d directory is only writable
- # by the root user, but others can execute and read.
- audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
- user='root', group='root',
- mode=0o755))
-
- # If core dumps are not enabled, then don't allow core dumps to be
- # created as they may contain sensitive information.
- if not settings['security']['kernel_enable_core_dump']:
- audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
- SecurityLimitsContext(),
- template_dir=TEMPLATES_DIR,
- user='root', group='root', mode=0o0440))
- return audits
-
-
-class SecurityLimitsContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
- ctxt = {'disable_core_dump':
- not settings['security']['kernel_enable_core_dump']}
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py
deleted file mode 100644
index d32c4f6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/login.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from six import string_types
-
-from charmhelpers.contrib.hardening.audits.file import TemplatedFile
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening login.defs audits.
-
- :returns: dictionary of audits
- """
- audits = [TemplatedFile('/etc/login.defs', LoginContext(),
- template_dir=TEMPLATES_DIR,
- user='root', group='root', mode=0o0444)]
- return audits
-
-
-class LoginContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
-
- # Octal numbers in yaml end up being turned into decimal,
- # so check if the umask is entered as a string (e.g. '027')
- # or as an octal umask as we know it (e.g. 002). If its not
- # a string assume it to be octal and turn it into an octal
- # string.
- umask = settings['environment']['umask']
- if not isinstance(umask, string_types):
- umask = '%s' % oct(umask)
-
- ctxt = {
- 'additional_user_paths':
- settings['environment']['extra_user_paths'],
- 'umask': umask,
- 'pwd_max_age': settings['auth']['pw_max_age'],
- 'pwd_min_age': settings['auth']['pw_min_age'],
- 'uid_min': settings['auth']['uid_min'],
- 'sys_uid_min': settings['auth']['sys_uid_min'],
- 'sys_uid_max': settings['auth']['sys_uid_max'],
- 'gid_min': settings['auth']['gid_min'],
- 'sys_gid_min': settings['auth']['sys_gid_min'],
- 'sys_gid_max': settings['auth']['sys_gid_max'],
- 'login_retries': settings['auth']['retries'],
- 'login_timeout': settings['auth']['timeout'],
- 'chfn_restrict': settings['auth']['chfn_restrict'],
- 'allow_login_without_home': settings['auth']['allow_homeless']
- }
-
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py
deleted file mode 100644
index c471064..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/minimize_access.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- ReadOnly,
-)
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening access audits.
-
- :returns: dictionary of audits
- """
- audits = []
- settings = utils.get_settings('os')
-
- # Remove write permissions from $PATH folders for all regular users.
- # This prevents changing system-wide commands from normal users.
- path_folders = {'/usr/local/sbin',
- '/usr/local/bin',
- '/usr/sbin',
- '/usr/bin',
- '/bin'}
- extra_user_paths = settings['environment']['extra_user_paths']
- path_folders.update(extra_user_paths)
- audits.append(ReadOnly(path_folders))
-
- # Only allow the root user to have access to the shadow file.
- audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
-
- if 'change_user' not in settings['security']['users_allow']:
- # su should only be accessible to user and group root, unless it is
- # expressly defined to allow users to change to root via the
- # security_users_allow config option.
- audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
-
- return audits
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py
deleted file mode 100644
index 383fe28..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/pam.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- check_output,
- CalledProcessError,
-)
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-from charmhelpers.fetch import (
- apt_install,
- apt_purge,
- apt_update,
-)
-from charmhelpers.contrib.hardening.audits.file import (
- TemplatedFile,
- DeletedFile,
-)
-from charmhelpers.contrib.hardening import utils
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-
-
-def get_audits():
- """Get OS hardening PAM authentication audits.
-
- :returns: dictionary of audits
- """
- audits = []
-
- settings = utils.get_settings('os')
-
- if settings['auth']['pam_passwdqc_enable']:
- audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
-
- if settings['auth']['retries']:
- audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
- else:
- audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
-
- return audits
-
-
-class PasswdqcPAMContext(object):
-
- def __call__(self):
- ctxt = {}
- settings = utils.get_settings('os')
-
- ctxt['auth_pam_passwdqc_options'] = \
- settings['auth']['pam_passwdqc_options']
-
- return ctxt
-
-
-class PasswdqcPAM(TemplatedFile):
- """The PAM Audit verifies the linux PAM settings."""
- def __init__(self, path):
- super(PasswdqcPAM, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=PasswdqcPAMContext(),
- user='root',
- group='root',
- mode=0o0640)
-
- def pre_write(self):
- # Always remove?
- for pkg in ['libpam-ccreds', 'libpam-cracklib']:
- log("Purging package '%s'" % pkg, level=DEBUG),
- apt_purge(pkg)
-
- apt_update(fatal=True)
- for pkg in ['libpam-passwdqc']:
- log("Installing package '%s'" % pkg, level=DEBUG),
- apt_install(pkg)
-
- def post_write(self):
- """Updates the PAM configuration after the file has been written"""
- try:
- check_output(['pam-auth-update', '--package'])
- except CalledProcessError as e:
- log('Error calling pam-auth-update: %s' % e, level=ERROR)
-
-
-class Tally2PAMContext(object):
-
- def __call__(self):
- ctxt = {}
- settings = utils.get_settings('os')
-
- ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
- ctxt['auth_retries'] = settings['auth']['retries']
-
- return ctxt
-
-
-class Tally2PAM(TemplatedFile):
- """The PAM Audit verifies the linux PAM settings."""
- def __init__(self, path):
- super(Tally2PAM, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=Tally2PAMContext(),
- user='root',
- group='root',
- mode=0o0640)
-
- def pre_write(self):
- # Always remove?
- apt_purge('libpam-ccreds')
- apt_update(fatal=True)
- apt_install('libpam-modules')
-
- def post_write(self):
- """Updates the PAM configuration after the file has been written"""
- try:
- check_output(['pam-auth-update', '--package'])
- except CalledProcessError as e:
- log('Error calling pam-auth-update: %s' % e, level=ERROR)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py
deleted file mode 100644
index f744335..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/profile.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import TemplatedFile
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening profile audits.
-
- :returns: dictionary of audits
- """
- audits = []
-
- settings = utils.get_settings('os')
-
- # If core dumps are not enabled, then don't allow core dumps to be
- # created as they may contain sensitive information.
- if not settings['security']['kernel_enable_core_dump']:
- audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
- ProfileContext(),
- template_dir=TEMPLATES_DIR,
- mode=0o0755, user='root', group='root'))
- return audits
-
-
-class ProfileContext(object):
-
- def __call__(self):
- ctxt = {}
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py
deleted file mode 100644
index e33c73c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/securetty.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.contrib.hardening.audits.file import TemplatedFile
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get OS hardening Secure TTY audits.
-
- :returns: dictionary of audits
- """
- audits = []
- audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
- template_dir=TEMPLATES_DIR,
- mode=0o0400, user='root', group='root'))
- return audits
-
-
-class SecureTTYContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
- ctxt = {'ttys': settings['auth']['root_ttys']}
- return ctxt
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
deleted file mode 100644
index 0534689..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/suid_sgid.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
-from charmhelpers.contrib.hardening import utils
-
-
-BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
- '/usr/libexec/openssh/ssh-keysign',
- '/usr/lib/openssh/ssh-keysign',
- '/sbin/netreport',
- '/usr/sbin/usernetctl',
- '/usr/sbin/userisdnctl',
- '/usr/sbin/pppd',
- '/usr/bin/lockfile',
- '/usr/bin/mail-lock',
- '/usr/bin/mail-unlock',
- '/usr/bin/mail-touchlock',
- '/usr/bin/dotlockfile',
- '/usr/bin/arping',
- '/usr/sbin/uuidd',
- '/usr/bin/mtr',
- '/usr/lib/evolution/camel-lock-helper-1.2',
- '/usr/lib/pt_chown',
- '/usr/lib/eject/dmcrypt-get-device',
- '/usr/lib/mc/cons.saver']
-
-WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
- '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
- '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
- '/usr/bin/passwd', '/usr/bin/ssh-agent',
- '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
- '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
- '/bin/ping6', '/usr/bin/traceroute6.iputils',
- '/sbin/mount.nfs', '/sbin/umount.nfs',
- '/sbin/mount.nfs4', '/sbin/umount.nfs4',
- '/usr/bin/crontab',
- '/usr/bin/wall', '/usr/bin/write',
- '/usr/bin/screen',
- '/usr/bin/mlocate',
- '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
- '/bin/fusermount',
- '/usr/bin/pkexec',
- '/usr/bin/sudo', '/usr/bin/sudoedit',
- '/usr/sbin/postdrop', '/usr/sbin/postqueue',
- '/usr/sbin/suexec',
- '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
- '/usr/kerberos/bin/ksu',
- '/usr/sbin/ccreds_validate',
- '/usr/bin/Xorg',
- '/usr/bin/X',
- '/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
- '/usr/lib/vte/gnome-pty-helper',
- '/usr/lib/libvte9/gnome-pty-helper',
- '/usr/lib/libvte-2.90-9/gnome-pty-helper']
-
-
-def get_audits():
- """Get OS hardening suid/sgid audits.
-
- :returns: dictionary of audits
- """
- checks = []
- settings = utils.get_settings('os')
- if not settings['security']['suid_sgid_enforce']:
- log("Skipping suid/sgid hardening", level=INFO)
- return checks
-
- # Build the blacklist and whitelist of files for suid/sgid checks.
- # There are a total of 4 lists:
- # 1. the system blacklist
- # 2. the system whitelist
- # 3. the user blacklist
- # 4. the user whitelist
- #
- # The blacklist is the set of paths which should NOT have the suid/sgid bit
- # set and the whitelist is the set of paths which MAY have the suid/sgid
- # bit setl. The user whitelist/blacklist effectively override the system
- # whitelist/blacklist.
- u_b = settings['security']['suid_sgid_blacklist']
- u_w = settings['security']['suid_sgid_whitelist']
-
- blacklist = set(BLACKLIST) - set(u_w + u_b)
- whitelist = set(WHITELIST) - set(u_b + u_w)
-
- checks.append(NoSUIDSGIDAudit(blacklist))
-
- dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
-
- if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
- # If the policy is a dry_run (e.g. complain only) or remove unknown
- # suid/sgid bits then find all of the paths which have the suid/sgid
- # bit set and then remove the whitelisted paths.
- root_path = settings['environment']['root_path']
- unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
- checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
-
- return checks
-
-
-def find_paths_with_suid_sgid(root_path):
- """Finds all paths/files which have an suid/sgid bit enabled.
-
- Starting with the root_path, this will recursively find all paths which
- have an suid or sgid bit set.
- """
- cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
- '-type', 'f', '!', '-path', '/proc/*', '-print']
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, _ = p.communicate()
- return set(out.split('\n'))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py
deleted file mode 100644
index 4a76d74..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/checks/sysctl.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import platform
-import re
-import six
-import subprocess
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
- WARNING,
-)
-from charmhelpers.contrib.hardening import utils
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
-
-
-SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
-net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
-net.ipv4.conf.all.rp_filter=1
-net.ipv4.conf.default.rp_filter=1
-net.ipv4.icmp_echo_ignore_broadcasts=1
-net.ipv4.icmp_ignore_bogus_error_responses=1
-net.ipv4.icmp_ratelimit=100
-net.ipv4.icmp_ratemask=88089
-net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
-net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
-net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
-net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
-net.ipv4.tcp_rfc1337=1
-net.ipv4.tcp_syncookies=1
-net.ipv4.conf.all.shared_media=1
-net.ipv4.conf.default.shared_media=1
-net.ipv4.conf.all.accept_source_route=0
-net.ipv4.conf.default.accept_source_route=0
-net.ipv4.conf.all.accept_redirects=0
-net.ipv4.conf.default.accept_redirects=0
-net.ipv6.conf.all.accept_redirects=0
-net.ipv6.conf.default.accept_redirects=0
-net.ipv4.conf.all.secure_redirects=0
-net.ipv4.conf.default.secure_redirects=0
-net.ipv4.conf.all.send_redirects=0
-net.ipv4.conf.default.send_redirects=0
-net.ipv4.conf.all.log_martians=0
-net.ipv6.conf.default.router_solicitations=0
-net.ipv6.conf.default.accept_ra_rtr_pref=0
-net.ipv6.conf.default.accept_ra_pinfo=0
-net.ipv6.conf.default.accept_ra_defrtr=0
-net.ipv6.conf.default.autoconf=0
-net.ipv6.conf.default.dad_transmits=0
-net.ipv6.conf.default.max_addresses=1
-net.ipv6.conf.all.accept_ra=0
-net.ipv6.conf.default.accept_ra=0
-kernel.modules_disabled=%(kernel_modules_disabled)s
-kernel.sysrq=%(kernel_sysrq)s
-fs.suid_dumpable=%(fs_suid_dumpable)s
-kernel.randomize_va_space=2
-"""
-
-
-def get_audits():
- """Get OS hardening sysctl audits.
-
- :returns: dictionary of audits
- """
- audits = []
- settings = utils.get_settings('os')
-
- # Apply the sysctl settings which are configured to be applied.
- audits.append(SysctlConf())
- # Make sure that only root has access to the sysctl.conf file, and
- # that it is read-only.
- audits.append(FilePermissionAudit('/etc/sysctl.conf',
- user='root',
- group='root', mode=0o0440))
- # If module loading is not enabled, then ensure that the modules
- # file has the appropriate permissions and rebuild the initramfs
- if not settings['security']['kernel_enable_module_loading']:
- audits.append(ModulesTemplate())
-
- return audits
-
-
-class ModulesContext(object):
-
- def __call__(self):
- settings = utils.get_settings('os')
- with open('/proc/cpuinfo', 'r') as fd:
- cpuinfo = fd.readlines()
-
- for line in cpuinfo:
- match = re.search(r"^vendor_id\s+:\s+(.+)", line)
- if match:
- vendor = match.group(1)
-
- if vendor == "GenuineIntel":
- vendor = "intel"
- elif vendor == "AuthenticAMD":
- vendor = "amd"
-
- ctxt = {'arch': platform.processor(),
- 'cpuVendor': vendor,
- 'desktop_enable': settings['general']['desktop_enable']}
-
- return ctxt
-
-
-class ModulesTemplate(object):
-
- def __init__(self):
- super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
- ModulesContext(),
- templates_dir=TEMPLATES_DIR,
- user='root', group='root',
- mode=0o0440)
-
- def post_write(self):
- subprocess.check_call(['update-initramfs', '-u'])
-
-
-class SysCtlHardeningContext(object):
- def __call__(self):
- settings = utils.get_settings('os')
- ctxt = {'sysctl': {}}
-
- log("Applying sysctl settings", level=INFO)
- extras = {'net_ipv4_ip_forward': 0,
- 'net_ipv6_conf_all_forwarding': 0,
- 'net_ipv6_conf_all_disable_ipv6': 1,
- 'net_ipv4_tcp_timestamps': 0,
- 'net_ipv4_conf_all_arp_ignore': 0,
- 'net_ipv4_conf_all_arp_announce': 0,
- 'kernel_sysrq': 0,
- 'fs_suid_dumpable': 0,
- 'kernel_modules_disabled': 1}
-
- if settings['sysctl']['ipv6_enable']:
- extras['net_ipv6_conf_all_disable_ipv6'] = 0
-
- if settings['sysctl']['forwarding']:
- extras['net_ipv4_ip_forward'] = 1
- extras['net_ipv6_conf_all_forwarding'] = 1
-
- if settings['sysctl']['arp_restricted']:
- extras['net_ipv4_conf_all_arp_ignore'] = 1
- extras['net_ipv4_conf_all_arp_announce'] = 2
-
- if settings['security']['kernel_enable_module_loading']:
- extras['kernel_modules_disabled'] = 0
-
- if settings['sysctl']['kernel_enable_sysrq']:
- sysrq_val = settings['sysctl']['kernel_secure_sysrq']
- extras['kernel_sysrq'] = sysrq_val
-
- if settings['security']['kernel_enable_core_dump']:
- extras['fs_suid_dumpable'] = 1
-
- settings.update(extras)
- for d in (SYSCTL_DEFAULTS % settings).split():
- d = d.strip().partition('=')
- key = d[0].strip()
- path = os.path.join('/proc/sys', key.replace('.', '/'))
- if not os.path.exists(path):
- log("Skipping '%s' since '%s' does not exist" % (key, path),
- level=WARNING)
- continue
-
- ctxt['sysctl'][key] = d[2] or None
-
- # Translate for python3
- return {'sysctl_settings':
- [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
-
-
-class SysctlConf(TemplatedFile):
- """An audit check for sysctl settings."""
- def __init__(self):
- self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
- super(SysctlConf, self).__init__(self.conffile,
- SysCtlHardeningContext(),
- template_dir=TEMPLATES_DIR,
- user='root', group='root',
- mode=0o0440)
-
- def post_write(self):
- try:
- subprocess.check_call(['sysctl', '-p', self.conffile])
- except subprocess.CalledProcessError as e:
- # NOTE: on some systems if sysctl cannot apply all settings it
- # will return non-zero as well.
- log("sysctl command returned an error (maybe some "
- "keys could not be set) - %s" % (e),
- level=WARNING)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
deleted file mode 100644
index 0014191..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-{% if disable_core_dump -%}
-# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information.
-* hard core 0
-{% endif %} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
deleted file mode 100644
index 101f1e1..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-{% for key, value in sysctl_settings -%}
-{{ key }}={{ value }}
-{% endfor -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs
deleted file mode 100644
index db137d6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/login.defs
+++ /dev/null
@@ -1,349 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-#
-# /etc/login.defs - Configuration control definitions for the login package.
-#
-# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
-# If unspecified, some arbitrary (and possibly incorrect) value will
-# be assumed. All other items are optional - if not specified then
-# the described action or option will be inhibited.
-#
-# Comment lines (lines beginning with "#") and blank lines are ignored.
-#
-# Modified for Linux. --marekm
-
-# REQUIRED for useradd/userdel/usermod
-# Directory where mailboxes reside, _or_ name of file, relative to the
-# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
-# MAIL_DIR takes precedence.
-#
-# Essentially:
-# - MAIL_DIR defines the location of users mail spool files
-# (for mbox use) by appending the username to MAIL_DIR as defined
-# below.
-# - MAIL_FILE defines the location of the users mail spool files as the
-# fully-qualified filename obtained by prepending the user home
-# directory before $MAIL_FILE
-#
-# NOTE: This is no more used for setting up users MAIL environment variable
-# which is, starting from shadow 4.0.12-1 in Debian, entirely the
-# job of the pam_mail PAM modules
-# See default PAM configuration files provided for
-# login, su, etc.
-#
-# This is a temporary situation: setting these variables will soon
-# move to /etc/default/useradd and the variables will then be
-# no more supported
-MAIL_DIR /var/mail
-#MAIL_FILE .mail
-
-#
-# Enable logging and display of /var/log/faillog login failure info.
-# This option conflicts with the pam_tally PAM module.
-#
-FAILLOG_ENAB yes
-
-#
-# Enable display of unknown usernames when login failures are recorded.
-#
-# WARNING: Unknown usernames may become world readable.
-# See #290803 and #298773 for details about how this could become a security
-# concern
-LOG_UNKFAIL_ENAB no
-
-#
-# Enable logging of successful logins
-#
-LOG_OK_LOGINS yes
-
-#
-# Enable "syslog" logging of su activity - in addition to sulog file logging.
-# SYSLOG_SG_ENAB does the same for newgrp and sg.
-#
-SYSLOG_SU_ENAB yes
-SYSLOG_SG_ENAB yes
-
-#
-# If defined, all su activity is logged to this file.
-#
-#SULOG_FILE /var/log/sulog
-
-#
-# If defined, file which maps tty line to TERM environment parameter.
-# Each line of the file is in a format something like "vt100 tty01".
-#
-#TTYTYPE_FILE /etc/ttytype
-
-#
-# If defined, login failures will be logged here in a utmp format
-# last, when invoked as lastb, will read /var/log/btmp, so...
-#
-FTMP_FILE /var/log/btmp
-
-#
-# If defined, the command name to display when running "su -". For
-# example, if this is defined as "su" then a "ps" will display the
-# command is "-su". If not defined, then "ps" would display the
-# name of the shell actually being run, e.g. something like "-sh".
-#
-SU_NAME su
-
-#
-# If defined, file which inhibits all the usual chatter during the login
-# sequence. If a full pathname, then hushed mode will be enabled if the
-# user's name or shell are found in the file. If not a full pathname, then
-# hushed mode will be enabled if the file exists in the user's home directory.
-#
-HUSHLOGIN_FILE .hushlogin
-#HUSHLOGIN_FILE /etc/hushlogins
-
-#
-# *REQUIRED* The default PATH settings, for superuser and normal users.
-#
-# (they are minimal, add the rest in the shell startup files)
-ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
-ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %}
-
-#
-# Terminal permissions
-#
-# TTYGROUP Login tty will be assigned this group ownership.
-# TTYPERM Login tty will be set to this permission.
-#
-# If you have a "write" program which is "setgid" to a special group
-# which owns the terminals, define TTYGROUP to the group number and
-# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
-# TTYPERM to either 622 or 600.
-#
-# In Debian /usr/bin/bsd-write or similar programs are setgid tty
-# However, the default and recommended value for TTYPERM is still 0600
-# to not allow anyone to write to anyone else console or terminal
-
-# Users can still allow other people to write them by issuing
-# the "mesg y" command.
-
-TTYGROUP tty
-TTYPERM 0600
-
-#
-# Login configuration initializations:
-#
-# ERASECHAR Terminal ERASE character ('\010' = backspace).
-# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
-# UMASK Default "umask" value.
-#
-# The ERASECHAR and KILLCHAR are used only on System V machines.
-#
-# UMASK is the default umask value for pam_umask and is used by
-# useradd and newusers to set the mode of the new home directories.
-# 022 is the "historical" value in Debian for UMASK
-# 027, or even 077, could be considered better for privacy
-# There is no One True Answer here : each sysadmin must make up his/her
-# mind.
-#
-# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
-# for private user groups, i. e. the uid is the same as gid, and username is
-# the same as the primary group name: for these, the user permissions will be
-# used as group permissions, e. g. 022 will become 002.
-#
-# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
-#
-ERASECHAR 0177
-KILLCHAR 025
-UMASK {{ umask }}
-
-# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name.
-# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user.
-USERGROUPS_ENAB yes
-
-#
-# Password aging controls:
-#
-# PASS_MAX_DAYS Maximum number of days a password may be used.
-# PASS_MIN_DAYS Minimum number of days allowed between password changes.
-# PASS_WARN_AGE Number of days warning given before a password expires.
-#
-PASS_MAX_DAYS {{ pwd_max_age }}
-PASS_MIN_DAYS {{ pwd_min_age }}
-PASS_WARN_AGE 7
-
-#
-# Min/max values for automatic uid selection in useradd
-#
-UID_MIN {{ uid_min }}
-UID_MAX 60000
-# System accounts
-SYS_UID_MIN {{ sys_uid_min }}
-SYS_UID_MAX {{ sys_uid_max }}
-
-# Min/max values for automatic gid selection in groupadd
-GID_MIN {{ gid_min }}
-GID_MAX 60000
-# System accounts
-SYS_GID_MIN {{ sys_gid_min }}
-SYS_GID_MAX {{ sys_gid_max }}
-
-#
-# Max number of login retries if password is bad. This will most likely be
-# overriden by PAM, since the default pam_unix module has it's own built
-# in of 3 retries. However, this is a safe fallback in case you are using
-# an authentication module that does not enforce PAM_MAXTRIES.
-#
-LOGIN_RETRIES {{ login_retries }}
-
-#
-# Max time in seconds for login
-#
-LOGIN_TIMEOUT {{ login_timeout }}
-
-#
-# Which fields may be changed by regular users using chfn - use
-# any combination of letters "frwh" (full name, room number, work
-# phone, home phone). If not defined, no changes are allowed.
-# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
-#
-{% if chfn_restrict %}
-CHFN_RESTRICT {{ chfn_restrict }}
-{% endif %}
-
-#
-# Should login be allowed if we can't cd to the home directory?
-# Default in no.
-#
-DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %}
-
-#
-# If defined, this command is run when removing a user.
-# It should remove any at/cron/print jobs etc. owned by
-# the user to be removed (passed as the first argument).
-#
-#USERDEL_CMD /usr/sbin/userdel_local
-
-#
-# Enable setting of the umask group bits to be the same as owner bits
-# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is
-# the same as gid, and username is the same as the primary group name.
-#
-# If set to yes, userdel will remove the user´s group if it contains no
-# more members, and useradd will create by default a group with the name
-# of the user.
-#
-USERGROUPS_ENAB yes
-
-#
-# Instead of the real user shell, the program specified by this parameter
-# will be launched, although its visible name (argv[0]) will be the shell's.
-# The program may do whatever it wants (logging, additional authentification,
-# banner, ...) before running the actual shell.
-#
-# FAKE_SHELL /bin/fakeshell
-
-#
-# If defined, either full pathname of a file containing device names or
-# a ":" delimited list of device names. Root logins will be allowed only
-# upon these devices.
-#
-# This variable is used by login and su.
-#
-#CONSOLE /etc/consoles
-#CONSOLE console:tty01:tty02:tty03:tty04
-
-#
-# List of groups to add to the user's supplementary group set
-# when logging in on the console (as determined by the CONSOLE
-# setting). Default is none.
-#
-# Use with caution - it is possible for users to gain permanent
-# access to these groups, even when not logged in on the console.
-# How to do it is left as an exercise for the reader...
-#
-# This variable is used by login and su.
-#
-#CONSOLE_GROUPS floppy:audio:cdrom
-
-#
-# If set to "yes", new passwords will be encrypted using the MD5-based
-# algorithm compatible with the one used by recent releases of FreeBSD.
-# It supports passwords of unlimited length and longer salt strings.
-# Set to "no" if you need to copy encrypted passwords to other systems
-# which don't understand the new algorithm. Default is "no".
-#
-# This variable is deprecated. You should use ENCRYPT_METHOD.
-#
-MD5_CRYPT_ENAB no
-
-#
-# If set to MD5 , MD5-based algorithm will be used for encrypting password
-# If set to SHA256, SHA256-based algorithm will be used for encrypting password
-# If set to SHA512, SHA512-based algorithm will be used for encrypting password
-# If set to DES, DES-based algorithm will be used for encrypting password (default)
-# Overrides the MD5_CRYPT_ENAB option
-#
-# Note: It is recommended to use a value consistent with
-# the PAM modules configuration.
-#
-ENCRYPT_METHOD SHA512
-
-#
-# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
-#
-# Define the number of SHA rounds.
-# With a lot of rounds, it is more difficult to brute forcing the password.
-# But note also that it more CPU resources will be needed to authenticate
-# users.
-#
-# If not specified, the libc will choose the default number of rounds (5000).
-# The values must be inside the 1000-999999999 range.
-# If only one of the MIN or MAX values is set, then this value will be used.
-# If MIN > MAX, the highest value will be used.
-#
-# SHA_CRYPT_MIN_ROUNDS 5000
-# SHA_CRYPT_MAX_ROUNDS 5000
-
-################# OBSOLETED BY PAM ##############
-# #
-# These options are now handled by PAM. Please #
-# edit the appropriate file in /etc/pam.d/ to #
-# enable the equivelants of them.
-#
-###############
-
-#MOTD_FILE
-#DIALUPS_CHECK_ENAB
-#LASTLOG_ENAB
-#MAIL_CHECK_ENAB
-#OBSCURE_CHECKS_ENAB
-#PORTTIME_CHECKS_ENAB
-#SU_WHEEL_ONLY
-#CRACKLIB_DICTPATH
-#PASS_CHANGE_TRIES
-#PASS_ALWAYS_WARN
-#ENVIRON_FILE
-#NOLOGINS_FILE
-#ISSUE_FILE
-#PASS_MIN_LEN
-#PASS_MAX_LEN
-#ULIMIT
-#ENV_HZ
-#CHFN_AUTH
-#CHSH_AUTH
-#FAIL_DELAY
-
-################# OBSOLETED #######################
-# #
-# These options are no more handled by shadow. #
-# #
-# Shadow utilities will display a warning if they #
-# still appear. #
-# #
-###################################################
-
-# CLOSE_SESSIONS
-# LOGIN_STRING
-# NO_PASSWORD_CONSOLE
-# QMAIL_DIR
-
-
-
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules
deleted file mode 100644
index ef0354e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/modules
+++ /dev/null
@@ -1,117 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# /etc/modules: kernel modules to load at boot time.
-#
-# This file contains the names of kernel modules that should be loaded
-# at boot time, one per line. Lines beginning with "#" are ignored.
-# Parameters can be specified after the module name.
-
-# Arch
-# ----
-#
-# Modules for certains builds, contains support modules and some CPU-specific optimizations.
-
-{% if arch == "x86_64" -%}
-# Optimize for x86_64 cryptographic features
-twofish-x86_64-3way
-twofish-x86_64
-aes-x86_64
-salsa20-x86_64
-blowfish-x86_64
-{% endif -%}
-
-{% if cpuVendor == "intel" -%}
-# Intel-specific optimizations
-ghash-clmulni-intel
-aesni-intel
-kvm-intel
-{% endif -%}
-
-{% if cpuVendor == "amd" -%}
-# AMD-specific optimizations
-kvm-amd
-{% endif -%}
-
-kvm
-
-
-# Crypto
-# ------
-
-# Some core modules which comprise strong cryptography.
-blowfish_common
-blowfish_generic
-ctr
-cts
-lrw
-lzo
-rmd160
-rmd256
-rmd320
-serpent
-sha512_generic
-twofish_common
-twofish_generic
-xts
-zlib
-
-
-# Drivers
-# -------
-
-# Basics
-lp
-rtc
-loop
-
-# Filesystems
-ext2
-btrfs
-
-{% if desktop_enable -%}
-# Desktop
-psmouse
-snd
-snd_ac97_codec
-snd_intel8x0
-snd_page_alloc
-snd_pcm
-snd_timer
-soundcore
-usbhid
-{% endif -%}
-
-# Lib
-# ---
-xz
-
-
-# Net
-# ---
-
-# All packets needed for netfilter rules (ie iptables, ebtables).
-ip_tables
-x_tables
-iptable_filter
-iptable_nat
-
-# Targets
-ipt_LOG
-ipt_REJECT
-
-# Modules
-xt_connlimit
-xt_tcpudp
-xt_recent
-xt_limit
-xt_conntrack
-nf_conntrack
-nf_conntrack_ipv4
-nf_defrag_ipv4
-xt_state
-nf_nat
-
-# Addons
-xt_pknock \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
deleted file mode 100644
index f98d14e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/passwdqc.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-Name: passwdqc password strength enforcement
-Default: yes
-Priority: 1024
-Conflicts: cracklib
-Password-Type: Primary
-Password:
- requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
deleted file mode 100644
index fd2de79..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# Disable core dumps via soft limits for all users. Compliance to this setting
-# is voluntary and can be modified by users up to a hard limit. This setting is
-# a sane default.
-ulimit -S -c 0 > /dev/null 2>&1
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty
deleted file mode 100644
index 15b18d4..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/securetty
+++ /dev/null
@@ -1,11 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# A list of TTYs, from which root can log in
-# see `man securetty` for reference
-{% if ttys -%}
-{% for tty in ttys -%}
-{{ tty }}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2 b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2
deleted file mode 100644
index d962029..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/host/templates/tally2
+++ /dev/null
@@ -1,14 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-Name: tally2 lockout after failed attempts enforcement
-Default: yes
-Priority: 1024
-Conflicts: cracklib
-Auth-Type: Primary
-Auth-Initial:
- required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }}
-Account-Type: Primary
-Account-Initial:
- required pam_tally2.so
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py
deleted file mode 100644
index d4f0ec1..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.mysql.checks import config
-
-
-def run_mysql_checks():
- log("Starting MySQL hardening checks.", level=DEBUG)
- checks = config.get_audits()
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("MySQL hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py
deleted file mode 100644
index 3af8b89..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/checks/config.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import subprocess
-
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-from charmhelpers.contrib.hardening.audits.file import (
- FilePermissionAudit,
- DirectoryPermissionAudit,
- TemplatedFile,
-)
-from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get MySQL hardening config audits.
-
- :returns: dictionary of audits
- """
- if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0:
- log("MySQL does not appear to be installed on this node - "
- "skipping mysql hardening", level=WARNING)
- return []
-
- settings = utils.get_settings('mysql')
- hardening_settings = settings['hardening']
- my_cnf = hardening_settings['mysql-conf']
-
- audits = [
- FilePermissionAudit(paths=[my_cnf], user='root',
- group='root', mode=0o0600),
-
- TemplatedFile(hardening_settings['hardening-conf'],
- MySQLConfContext(),
- TEMPLATES_DIR,
- mode=0o0750,
- user='mysql',
- group='root',
- service_actions=[{'service': 'mysql',
- 'actions': ['restart']}]),
-
- # MySQL and Percona charms do not allow configuration of the
- # data directory, so use the default.
- DirectoryPermissionAudit('/var/lib/mysql',
- user='mysql',
- group='mysql',
- recursive=False,
- mode=0o755),
-
- DirectoryPermissionAudit('/etc/mysql',
- user='root',
- group='root',
- recursive=False,
- mode=0o700),
- ]
-
- return audits
-
-
-class MySQLConfContext(object):
- """Defines the set of key/value pairs to set in a mysql config file.
-
- This context, when called, will return a dictionary containing the
- key/value pairs of setting to specify in the
- /etc/mysql/conf.d/hardening.cnf file.
- """
- def __call__(self):
- settings = utils.get_settings('mysql')
- # Translate for python3
- return {'mysql_settings':
- [(k, v) for k, v in six.iteritems(settings['security'])]}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
deleted file mode 100644
index 8242586..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf
+++ /dev/null
@@ -1,12 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-[mysqld]
-{% for setting, value in mysql_settings -%}
-{% if value == 'True' -%}
-{{ setting }}
-{% elif value != 'None' and value != None -%}
-{{ setting }} = {{ value }}
-{% endif -%}
-{% endfor -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py
deleted file mode 100644
index 277b8c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from os import path
-
-TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py
deleted file mode 100644
index b85150d..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.contrib.hardening.ssh.checks import config
-
-
-def run_ssh_checks():
- log("Starting SSH hardening checks.", level=DEBUG)
- checks = config.get_audits()
- for check in checks:
- log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
- check.ensure_compliance()
-
- log("SSH hardening checks complete.", level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py
deleted file mode 100644
index 3fb6ae8..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/checks/config.py
+++ /dev/null
@@ -1,394 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
-)
-from charmhelpers.fetch import (
- apt_install,
- apt_update,
-)
-from charmhelpers.core.host import lsb_release
-from charmhelpers.contrib.hardening.audits.file import (
- TemplatedFile,
- FileContentAudit,
-)
-from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR
-from charmhelpers.contrib.hardening import utils
-
-
-def get_audits():
- """Get SSH hardening config audits.
-
- :returns: dictionary of audits
- """
- audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(),
- SSHDConfigFileContentAudit()]
- return audits
-
-
-class SSHConfigContext(object):
-
- type = 'client'
-
- def get_macs(self, allow_weak_mac):
- if allow_weak_mac:
- weak_macs = 'weak'
- else:
- weak_macs = 'default'
-
- default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160'
- macs = {'default': default,
- 'weak': default + ',hmac-sha1'}
-
- default = ('hmac-sha2-512-etm@openssh.com,'
- 'hmac-sha2-256-etm@openssh.com,'
- 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,'
- 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160')
- macs_66 = {'default': default,
- 'weak': default + ',hmac-sha1'}
-
- # Use newer ciphers on Ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG)
- macs = macs_66
-
- return macs[weak_macs]
-
- def get_kexs(self, allow_weak_kex):
- if allow_weak_kex:
- weak_kex = 'weak'
- else:
- weak_kex = 'default'
-
- default = 'diffie-hellman-group-exchange-sha256'
- weak = (default + ',diffie-hellman-group14-sha1,'
- 'diffie-hellman-group-exchange-sha1,'
- 'diffie-hellman-group1-sha1')
- kex = {'default': default,
- 'weak': weak}
-
- default = ('curve25519-sha256@libssh.org,'
- 'diffie-hellman-group-exchange-sha256')
- weak = (default + ',diffie-hellman-group14-sha1,'
- 'diffie-hellman-group-exchange-sha1,'
- 'diffie-hellman-group1-sha1')
- kex_66 = {'default': default,
- 'weak': weak}
-
- # Use newer kex on Ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- log('Detected Ubuntu 14.04 or newer, using new key exchange '
- 'algorithms', level=DEBUG)
- kex = kex_66
-
- return kex[weak_kex]
-
- def get_ciphers(self, cbc_required):
- if cbc_required:
- weak_ciphers = 'weak'
- else:
- weak_ciphers = 'default'
-
- default = 'aes256-ctr,aes192-ctr,aes128-ctr'
- cipher = {'default': default,
- 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'}
-
- default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,'
- 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr')
- ciphers_66 = {'default': default,
- 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'}
-
- # Use newer ciphers on ubuntu Trusty and above
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- log('Detected Ubuntu 14.04 or newer, using new ciphers',
- level=DEBUG)
- cipher = ciphers_66
-
- return cipher[weak_ciphers]
-
- def __call__(self):
- settings = utils.get_settings('ssh')
- if settings['common']['network_ipv6_enable']:
- addr_family = 'any'
- else:
- addr_family = 'inet'
-
- ctxt = {
- 'addr_family': addr_family,
- 'remote_hosts': settings['common']['remote_hosts'],
- 'password_auth_allowed':
- settings['client']['password_authentication'],
- 'ports': settings['common']['ports'],
- 'ciphers': self.get_ciphers(settings['client']['cbc_required']),
- 'macs': self.get_macs(settings['client']['weak_hmac']),
- 'kexs': self.get_kexs(settings['client']['weak_kex']),
- 'roaming': settings['client']['roaming'],
- }
- return ctxt
-
-
-class SSHConfig(TemplatedFile):
- def __init__(self):
- path = '/etc/ssh/ssh_config'
- super(SSHConfig, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=SSHConfigContext(),
- user='root',
- group='root',
- mode=0o0644)
-
- def pre_write(self):
- settings = utils.get_settings('ssh')
- apt_update(fatal=True)
- apt_install(settings['client']['package'])
- if not os.path.exists('/etc/ssh'):
- os.makedir('/etc/ssh')
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
- def post_write(self):
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
-
-class SSHDConfigContext(SSHConfigContext):
-
- type = 'server'
-
- def __call__(self):
- settings = utils.get_settings('ssh')
- if settings['common']['network_ipv6_enable']:
- addr_family = 'any'
- else:
- addr_family = 'inet'
-
- ctxt = {
- 'ssh_ip': settings['server']['listen_to'],
- 'password_auth_allowed':
- settings['server']['password_authentication'],
- 'ports': settings['common']['ports'],
- 'addr_family': addr_family,
- 'ciphers': self.get_ciphers(settings['server']['cbc_required']),
- 'macs': self.get_macs(settings['server']['weak_hmac']),
- 'kexs': self.get_kexs(settings['server']['weak_kex']),
- 'host_key_files': settings['server']['host_key_files'],
- 'allow_root_with_key': settings['server']['allow_root_with_key'],
- 'password_authentication':
- settings['server']['password_authentication'],
- 'use_priv_sep': settings['server']['use_privilege_separation'],
- 'use_pam': settings['server']['use_pam'],
- 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'],
- 'print_motd': settings['server']['print_motd'],
- 'print_last_log': settings['server']['print_last_log'],
- 'client_alive_interval':
- settings['server']['alive_interval'],
- 'client_alive_count': settings['server']['alive_count'],
- 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'],
- 'allow_agent_forwarding':
- settings['server']['allow_agent_forwarding'],
- 'deny_users': settings['server']['deny_users'],
- 'allow_users': settings['server']['allow_users'],
- 'deny_groups': settings['server']['deny_groups'],
- 'allow_groups': settings['server']['allow_groups'],
- 'use_dns': settings['server']['use_dns'],
- 'sftp_enable': settings['server']['sftp_enable'],
- 'sftp_group': settings['server']['sftp_group'],
- 'sftp_chroot': settings['server']['sftp_chroot'],
- 'max_auth_tries': settings['server']['max_auth_tries'],
- 'max_sessions': settings['server']['max_sessions'],
- }
- return ctxt
-
-
-class SSHDConfig(TemplatedFile):
- def __init__(self):
- path = '/etc/ssh/sshd_config'
- super(SSHDConfig, self).__init__(path=path,
- template_dir=TEMPLATES_DIR,
- context=SSHDConfigContext(),
- user='root',
- group='root',
- mode=0o0600,
- service_actions=[{'service': 'ssh',
- 'actions':
- ['restart']}])
-
- def pre_write(self):
- settings = utils.get_settings('ssh')
- apt_update(fatal=True)
- apt_install(settings['server']['package'])
- if not os.path.exists('/etc/ssh'):
- os.makedir('/etc/ssh')
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
- def post_write(self):
- # NOTE: don't recurse
- utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755,
- maxdepth=0)
-
-
-class SSHConfigFileContentAudit(FileContentAudit):
- def __init__(self):
- self.path = '/etc/ssh/ssh_config'
- super(SSHConfigFileContentAudit, self).__init__(self.path, {})
-
- def is_compliant(self, *args, **kwargs):
- self.pass_cases = []
- self.fail_cases = []
- settings = utils.get_settings('ssh')
-
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- if not settings['server']['weak_hmac']:
- self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['server']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
-
- if settings['server']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- if not settings['client']['weak_hmac']:
- self.fail_cases.append(r'^MACs.+,hmac-sha1$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['client']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
-
- if settings['client']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
-
- if settings['client']['roaming']:
- self.pass_cases.append(r'^UseRoaming yes$')
- else:
- self.fail_cases.append(r'^UseRoaming yes$')
-
- return super(SSHConfigFileContentAudit, self).is_compliant(*args,
- **kwargs)
-
-
-class SSHDConfigFileContentAudit(FileContentAudit):
- def __init__(self):
- self.path = '/etc/ssh/sshd_config'
- super(SSHDConfigFileContentAudit, self).__init__(self.path, {})
-
- def is_compliant(self, *args, **kwargs):
- self.pass_cases = []
- self.fail_cases = []
- settings = utils.get_settings('ssh')
-
- if lsb_release()['DISTRIB_CODENAME'].lower() >= 'trusty':
- if not settings['server']['weak_hmac']:
- self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['server']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa
-
- if settings['server']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- if not settings['server']['weak_hmac']:
- self.pass_cases.append(r'^MACs.+,hmac-ripemd160$')
- else:
- self.pass_cases.append(r'^MACs.+,hmac-sha1$')
-
- if settings['server']['weak_kex']:
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
- else:
- self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa
- self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa
-
- if settings['server']['cbc_required']:
- self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
- else:
- self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?')
- self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?')
-
- if settings['server']['sftp_enable']:
- self.pass_cases.append(r'^Subsystem\ssftp')
- else:
- self.fail_cases.append(r'^Subsystem\ssftp')
-
- return super(SSHDConfigFileContentAudit, self).is_compliant(*args,
- **kwargs)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/__init__.py
+++ /dev/null
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config
deleted file mode 100644
index 9742d8e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/ssh_config
+++ /dev/null
@@ -1,70 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# This is the ssh client system-wide configuration file. See
-# ssh_config(5) for more information. This file provides defaults for
-# users, and the values can be changed in per-user configuration files
-# or on the command line.
-
-# Configuration data is parsed as follows:
-# 1. command line options
-# 2. user-specific file
-# 3. system-wide file
-# Any configuration value is only changed the first time it is set.
-# Thus, host-specific definitions should be at the beginning of the
-# configuration file, and defaults at the end.
-
-# Site-wide defaults for some commonly used options. For a comprehensive
-# list of available options, their meanings and defaults, please see the
-# ssh_config(5) man page.
-
-# Restrict the following configuration to be limited to this Host.
-{% if remote_hosts -%}
-Host {{ ' '.join(remote_hosts) }}
-{% endif %}
-ForwardAgent no
-ForwardX11 no
-ForwardX11Trusted yes
-RhostsRSAAuthentication no
-RSAAuthentication yes
-PasswordAuthentication {{ password_auth_allowed }}
-HostbasedAuthentication no
-GSSAPIAuthentication no
-GSSAPIDelegateCredentials no
-GSSAPIKeyExchange no
-GSSAPITrustDNS no
-BatchMode no
-CheckHostIP yes
-AddressFamily {{ addr_family }}
-ConnectTimeout 0
-StrictHostKeyChecking ask
-IdentityFile ~/.ssh/identity
-IdentityFile ~/.ssh/id_rsa
-IdentityFile ~/.ssh/id_dsa
-# The port at the destination should be defined
-{% for port in ports -%}
-Port {{ port }}
-{% endfor %}
-Protocol 2
-Cipher 3des
-{% if ciphers -%}
-Ciphers {{ ciphers }}
-{%- endif %}
-{% if macs -%}
-MACs {{ macs }}
-{%- endif %}
-{% if kexs -%}
-KexAlgorithms {{ kexs }}
-{%- endif %}
-EscapeChar ~
-Tunnel no
-TunnelDevice any:any
-PermitLocalCommand no
-VisualHostKey no
-RekeyLimit 1G 1h
-SendEnv LANG LC_*
-HashKnownHosts yes
-{% if roaming -%}
-UseRoaming {{ roaming }}
-{% endif %}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config
deleted file mode 100644
index 5f87298..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/ssh/templates/sshd_config
+++ /dev/null
@@ -1,159 +0,0 @@
-###############################################################################
-# WARNING: This configuration file is maintained by Juju. Local changes may
-# be overwritten.
-###############################################################################
-# Package generated configuration file
-# See the sshd_config(5) manpage for details
-
-# What ports, IPs and protocols we listen for
-{% for port in ports -%}
-Port {{ port }}
-{% endfor -%}
-AddressFamily {{ addr_family }}
-# Use these options to restrict which interfaces/protocols sshd will bind to
-{% if ssh_ip -%}
-{% for ip in ssh_ip -%}
-ListenAddress {{ ip }}
-{% endfor %}
-{%- else -%}
-ListenAddress ::
-ListenAddress 0.0.0.0
-{% endif -%}
-Protocol 2
-{% if ciphers -%}
-Ciphers {{ ciphers }}
-{% endif -%}
-{% if macs -%}
-MACs {{ macs }}
-{% endif -%}
-{% if kexs -%}
-KexAlgorithms {{ kexs }}
-{% endif -%}
-# HostKeys for protocol version 2
-{% for keyfile in host_key_files -%}
-HostKey {{ keyfile }}
-{% endfor -%}
-
-# Privilege Separation is turned on for security
-{% if use_priv_sep -%}
-UsePrivilegeSeparation {{ use_priv_sep }}
-{% endif -%}
-
-# Lifetime and size of ephemeral version 1 server key
-KeyRegenerationInterval 3600
-ServerKeyBits 1024
-
-# Logging
-SyslogFacility AUTH
-LogLevel VERBOSE
-
-# Authentication:
-LoginGraceTime 30s
-{% if allow_root_with_key -%}
-PermitRootLogin without-password
-{% else -%}
-PermitRootLogin no
-{% endif %}
-PermitTunnel no
-PermitUserEnvironment no
-StrictModes yes
-
-RSAAuthentication yes
-PubkeyAuthentication yes
-AuthorizedKeysFile %h/.ssh/authorized_keys
-
-# Don't read the user's ~/.rhosts and ~/.shosts files
-IgnoreRhosts yes
-# For this to work you will also need host keys in /etc/ssh_known_hosts
-RhostsRSAAuthentication no
-# similar for protocol version 2
-HostbasedAuthentication no
-# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication
-IgnoreUserKnownHosts yes
-
-# To enable empty passwords, change to yes (NOT RECOMMENDED)
-PermitEmptyPasswords no
-
-# Change to yes to enable challenge-response passwords (beware issues with
-# some PAM modules and threads)
-ChallengeResponseAuthentication no
-
-# Change to no to disable tunnelled clear text passwords
-PasswordAuthentication {{ password_authentication }}
-
-# Kerberos options
-KerberosAuthentication no
-KerberosGetAFSToken no
-KerberosOrLocalPasswd no
-KerberosTicketCleanup yes
-
-# GSSAPI options
-GSSAPIAuthentication no
-GSSAPICleanupCredentials yes
-
-X11Forwarding {{ allow_x11_forwarding }}
-X11DisplayOffset 10
-X11UseLocalhost yes
-GatewayPorts no
-PrintMotd {{ print_motd }}
-PrintLastLog {{ print_last_log }}
-TCPKeepAlive no
-UseLogin no
-
-ClientAliveInterval {{ client_alive_interval }}
-ClientAliveCountMax {{ client_alive_count }}
-AllowTcpForwarding {{ allow_tcp_forwarding }}
-AllowAgentForwarding {{ allow_agent_forwarding }}
-
-MaxStartups 10:30:100
-#Banner /etc/issue.net
-
-# Allow client to pass locale environment variables
-AcceptEnv LANG LC_*
-
-# Set this to 'yes' to enable PAM authentication, account processing,
-# and session processing. If this is enabled, PAM authentication will
-# be allowed through the ChallengeResponseAuthentication and
-# PasswordAuthentication. Depending on your PAM configuration,
-# PAM authentication via ChallengeResponseAuthentication may bypass
-# the setting of "PermitRootLogin without-password".
-# If you just want the PAM account and session checks to run without
-# PAM authentication, then enable this but set PasswordAuthentication
-# and ChallengeResponseAuthentication to 'no'.
-UsePAM {{ use_pam }}
-
-{% if deny_users -%}
-DenyUsers {{ deny_users }}
-{% endif -%}
-{% if allow_users -%}
-AllowUsers {{ allow_users }}
-{% endif -%}
-{% if deny_groups -%}
-DenyGroups {{ deny_groups }}
-{% endif -%}
-{% if allow_groups -%}
-AllowGroups allow_groups
-{% endif -%}
-UseDNS {{ use_dns }}
-MaxAuthTries {{ max_auth_tries }}
-MaxSessions {{ max_sessions }}
-
-{% if sftp_enable -%}
-# Configuration, in case SFTP is used
-## override default of no subsystems
-## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
-Subsystem sftp internal-sftp -l VERBOSE
-
-## These lines must appear at the *end* of sshd_config
-Match Group {{ sftp_group }}
-ForceCommand internal-sftp -l VERBOSE
-ChrootDirectory {{ sftp_chroot }}
-{% else -%}
-# Configuration, in case SFTP is used
-## override default of no subsystems
-## Subsystem sftp /opt/app/openssh5/libexec/sftp-server
-## These lines must appear at the *end* of sshd_config
-Match Group sftponly
-ForceCommand internal-sftp -l VERBOSE
-ChrootDirectory /sftpchroot/home/%u
-{% endif %}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py
deleted file mode 100644
index d2ab7dc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/templating.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- WARNING,
-)
-
-try:
- from jinja2 import FileSystemLoader, Environment
-except ImportError:
- from charmhelpers.fetch import apt_install
- from charmhelpers.fetch import apt_update
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment
-
-
-# NOTE: function separated from main rendering code to facilitate easier
-# mocking in unit tests.
-def write(path, data):
- with open(path, 'wb') as out:
- out.write(data)
-
-
-def get_template_path(template_dir, path):
- """Returns the template file which would be used to render the path.
-
- The path to the template file is returned.
- :param template_dir: the directory the templates are located in
- :param path: the file path to be written to.
- :returns: path to the template file
- """
- return os.path.join(template_dir, os.path.basename(path))
-
-
-def render_and_write(template_dir, path, context):
- """Renders the specified template into the file.
-
- :param template_dir: the directory to load the template from
- :param path: the path to write the templated contents to
- :param context: the parameters to pass to the rendering engine
- """
- env = Environment(loader=FileSystemLoader(template_dir))
- template_file = os.path.basename(path)
- template = env.get_template(template_file)
- log('Rendering from template: %s' % template.name, level=DEBUG)
- rendered_content = template.render(context)
- if not rendered_content:
- log("Render returned None - skipping '%s'" % path,
- level=WARNING)
- return
-
- write(path, rendered_content.encode('utf-8').strip())
- log('Wrote template %s' % path, level=DEBUG)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py
deleted file mode 100644
index a6743a4..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/hardening/utils.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright 2016 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import grp
-import os
-import pwd
-import six
-import yaml
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-
-
-# Global settings cache. Since each hook fire entails a fresh module import it
-# is safe to hold this in memory and not risk missing config changes (since
-# they will result in a new hook fire and thus re-import).
-__SETTINGS__ = {}
-
-
-def _get_defaults(modules):
- """Load the default config for the provided modules.
-
- :param modules: stack modules config defaults to lookup.
- :returns: modules default config dictionary.
- """
- default = os.path.join(os.path.dirname(__file__),
- 'defaults/%s.yaml' % (modules))
- return yaml.safe_load(open(default))
-
-
-def _get_schema(modules):
- """Load the config schema for the provided modules.
-
- NOTE: this schema is intended to have 1-1 relationship with they keys in
- the default config and is used a means to verify valid overrides provided
- by the user.
-
- :param modules: stack modules config schema to lookup.
- :returns: modules default schema dictionary.
- """
- schema = os.path.join(os.path.dirname(__file__),
- 'defaults/%s.yaml.schema' % (modules))
- return yaml.safe_load(open(schema))
-
-
-def _get_user_provided_overrides(modules):
- """Load user-provided config overrides.
-
- :param modules: stack modules to lookup in user overrides yaml file.
- :returns: overrides dictionary.
- """
- overrides = os.path.join(os.environ['JUJU_CHARM_DIR'],
- 'hardening.yaml')
- if os.path.exists(overrides):
- log("Found user-provided config overrides file '%s'" %
- (overrides), level=DEBUG)
- settings = yaml.safe_load(open(overrides))
- if settings and settings.get(modules):
- log("Applying '%s' overrides" % (modules), level=DEBUG)
- return settings.get(modules)
-
- log("No overrides found for '%s'" % (modules), level=DEBUG)
- else:
- log("No hardening config overrides file '%s' found in charm "
- "root dir" % (overrides), level=DEBUG)
-
- return {}
-
-
-def _apply_overrides(settings, overrides, schema):
- """Get overrides config overlayed onto modules defaults.
-
- :param modules: require stack modules config.
- :returns: dictionary of modules config with user overrides applied.
- """
- if overrides:
- for k, v in six.iteritems(overrides):
- if k in schema:
- if schema[k] is None:
- settings[k] = v
- elif type(schema[k]) is dict:
- settings[k] = _apply_overrides(settings[k], overrides[k],
- schema[k])
- else:
- raise Exception("Unexpected type found in schema '%s'" %
- type(schema[k]), level=ERROR)
- else:
- log("Unknown override key '%s' - ignoring" % (k), level=INFO)
-
- return settings
-
-
-def get_settings(modules):
- global __SETTINGS__
- if modules in __SETTINGS__:
- return __SETTINGS__[modules]
-
- schema = _get_schema(modules)
- settings = _get_defaults(modules)
- overrides = _get_user_provided_overrides(modules)
- __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema)
- return __SETTINGS__[modules]
-
-
-def ensure_permissions(path, user, group, permissions, maxdepth=-1):
- """Ensure permissions for path.
-
- If path is a file, apply to file and return. If path is a directory,
- apply recursively (if required) to directory contents and return.
-
- :param user: user name
- :param group: group name
- :param permissions: octal permissions
- :param maxdepth: maximum recursion depth. A negative maxdepth allows
- infinite recursion and maxdepth=0 means no recursion.
- :returns: None
- """
- if not os.path.exists(path):
- log("File '%s' does not exist - cannot set permissions" % (path),
- level=WARNING)
- return
-
- _user = pwd.getpwnam(user)
- os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
- os.chmod(path, permissions)
-
- if maxdepth == 0:
- log("Max recursion depth reached - skipping further recursion",
- level=DEBUG)
- return
- elif maxdepth > 0:
- maxdepth -= 1
-
- if os.path.isdir(path):
- contents = glob.glob("%s/*" % (path))
- for c in contents:
- ensure_permissions(c, user=user, group=group,
- permissions=permissions, maxdepth=maxdepth)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py b/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index 6bba07b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,499 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.core.hookenv import unit_get
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network(s): %s" % network)
- raise ValueError(errmsg)
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- networks = network.split() or [network]
- for network in networks:
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
-
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def resolve_network_cidr(ip_address):
- '''
- Resolves the full address cidr of an ip_address based on
- configured network interfaces
- '''
- netmask = get_netmask_for_address(ip_address)
- return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr)
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any.
-
- :param iface: network interface on which address(es) are expected to
- be found.
- :param inet_type: inet address family
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :return: list of ip addresses
- """
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
-
- :param iface: network interface on which ipv6 address(es) are expected to
- be found.
- :param inc_aliases: include alias interfaces in search
- :param fatal: if True, raise exception if address not found
- :param exc_list: list of addresses to ignore
- :param dynamic_only: only recognise dynamic addresses
- :return: list of ipv6 addresses
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4 address
- socket.inet_aton(address)
- return True
- except socket.error:
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- apt_install('python-dnspython')
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- answers = dns.resolver.query(address, rtype)
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- apt_install("python-dnspython")
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
-
-
-def port_has_listener(address, port):
- """
- Returns True if the address:port is open and being listened to,
- else False.
-
- @param address: an IP address or hostname
- @param port: integer port
-
- Note calls 'zc' via a subprocess shell
- """
- cmd = ['nc', '-z', address, str(port)]
- result = subprocess.call(cmd)
- return not(bool(result))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index ef77caf..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index d21c9c7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index ef3bdcc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1012 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index c07b33d..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1583 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call, CalledProcessError
-
-import six
-import yaml
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
- status_set,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
- pwgen,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import get_host_ip
-from charmhelpers.core.unitdata import kv
-
-try:
- import psutil
-except ImportError:
- apt_install('python-psutil', fatal=True)
- import psutil
-
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-class OSContextError(Exception):
- pass
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- log("config_flags is not in expected format (key=value)", level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- log("Invalid config value(s) at index %s" % (i), level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- api_version = rdata.get('api_version') or '2.0'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol,
- 'api_version': api_version})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('haproxy-queue-timeout'):
- ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
-
- if config('haproxy-connect-timeout'):
- ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
-
- ctxt['stat_port'] = '8888'
-
- db = kv()
- ctxt['stat_password'] = db.get('stat-password')
- if not ctxt['stat_password']:
- ctxt['stat_password'] = db.set('stat-password',
- pwgen(32))
- db.flush()
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def midonet_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- midonet_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- mido_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'midonet',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': midonet_config}
-
- return mido_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
- elif self.plugin == 'midonet':
- ctxt.update(self.midonet_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class LibvirtConfigFlagsContext(OSContextGenerator):
- """
- This context provides support for extending
- the libvirt section through user-defined flags.
- """
- def __call__(self):
- ctxt = {}
- libvirt_flags = config('libvirt-flags')
- if libvirt_flags:
- ctxt['libvirt_flags'] = config_flags_parser(
- libvirt_flags)
- return ctxt
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_configuration': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from '
- 'subordinate_configuration setting from %s'
- % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s service'
- % (rid, service), level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_configuration on %s but it '
- 'contained nothing for %s'
- % (rid, self.config_file), level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- # NOTE: use cpu_count if present (16.04 support)
- if hasattr(psutil, 'cpu_count'):
- return psutil.cpu_count()
- else:
- return psutil.NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- ctxt = {"workers": self.num_cpus * multiplier}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {normalized[port]: bridge for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.keys():
- ports = sorted(mappings.keys())
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- all_ports = set()
- # If any of ports is a vlan device, its underlying device must have
- # mtu applied first.
- for port in ports:
- for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
- lport = os.path.basename(lport)
- all_ports.add(lport.split('_')[1])
-
- all_ports = list(all_ports)
- all_ports.extend(ports)
- if mtu:
- ctxt["devs"] = '\\n'.join(all_ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- 'api_version':
- rdata.get('api_version') or '2.0',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
-
-
-class InternalEndpointContext(OSContextGenerator):
- """Internal endpoint context.
-
- This context provides the endpoint type used for communication between
- services e.g. between Nova and Cinder internally. Openstack uses Public
- endpoints by default so this allows admins to optionally use internal
- endpoints.
- """
- def __call__(self):
- return {'use_internal_endpoints': config('use-internal-endpoints')}
-
-
-class AppArmorContext(OSContextGenerator):
- """Base class for apparmor contexts."""
-
- def __init__(self):
- self._ctxt = None
- self.aa_profile = None
- self.aa_utils_packages = ['apparmor-utils']
-
- @property
- def ctxt(self):
- if self._ctxt is not None:
- return self._ctxt
- self._ctxt = self._determine_ctxt()
- return self._ctxt
-
- def _determine_ctxt(self):
- """
- Validate aa-profile-mode settings is disable, enforce, or complain.
-
- :return ctxt: Dictionary of the apparmor profile or None
- """
- if config('aa-profile-mode') in ['disable', 'enforce', 'complain']:
- ctxt = {'aa-profile-mode': config('aa-profile-mode')}
- else:
- ctxt = None
- return ctxt
-
- def __call__(self):
- return self.ctxt
-
- def install_aa_utils(self):
- """
- Install packages required for apparmor configuration.
- """
- log("Installing apparmor utils.")
- ensure_packages(self.aa_utils_packages)
-
- def manually_disable_aa_profile(self):
- """
- Manually disable an apparmor profile.
-
- If aa-profile-mode is set to disabled (default) this is required as the
- template has been written but apparmor is yet unaware of the profile
- and aa-disable aa-profile fails. Without this the profile would kick
- into enforce mode on the next service restart.
-
- """
- profile_path = '/etc/apparmor.d'
- disable_path = '/etc/apparmor.d/disable'
- if not os.path.lexists(os.path.join(disable_path, self.aa_profile)):
- os.symlink(os.path.join(profile_path, self.aa_profile),
- os.path.join(disable_path, self.aa_profile))
-
- def setup_aa_profile(self):
- """
- Setup an apparmor profile.
- The ctxt dictionary will contain the apparmor profile mode and
- the apparmor profile name.
- Makes calls out to aa-disable, aa-complain, or aa-enforce to setup
- the apparmor profile.
- """
- self()
- if not self.ctxt:
- log("Not enabling apparmor Profile")
- return
- self.install_aa_utils()
- cmd = ['aa-{}'.format(self.ctxt['aa-profile-mode'])]
- cmd.append(self.ctxt['aa-profile'])
- log("Setting up the apparmor profile for {} in {} mode."
- "".format(self.ctxt['aa-profile'], self.ctxt['aa-profile-mode']))
- try:
- check_call(cmd)
- except CalledProcessError as e:
- # If aa-profile-mode is set to disabled (default) manual
- # disabling is required as the template has been written but
- # apparmor is yet unaware of the profile and aa-disable aa-profile
- # fails. If aa-disable learns to read profile files first this can
- # be removed.
- if self.ctxt['aa-profile-mode'] == 'disable':
- log("Manually disabling the apparmor profile for {}."
- "".format(self.ctxt['aa-profile']))
- self.manually_disable_aa_profile()
- return
- status_set('blocked', "Apparmor profile {} failed to be set to {}."
- "".format(self.ctxt['aa-profile'],
- self.ctxt['aa-profile-mode']))
- raise e
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh
deleted file mode 100755
index 0df0717..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-CRITICAL=0
-NOTACTIVE=''
-LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
-
-typeset -i N_INSTANCES=0
-for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
-do
- N_INSTANCES=N_INSTANCES+1
- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
- if [ $? != 0 ]; then
- date >> $LOGFILE
- echo $output >> $LOGFILE
- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
- CRITICAL=1
- NOTACTIVE="${NOTACTIVE} $appserver"
- fi
-done
-
-if [ $CRITICAL = 1 ]; then
- echo "CRITICAL:${NOTACTIVE}"
- exit 2
-fi
-
-echo "OK: All haproxy instances ($N_INSTANCES) looking good"
-exit 0
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
deleted file mode 100755
index 3ebb532..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-# These should be config options at some stage
-CURRQthrsh=0
-MAXQthrsh=100
-
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
-
-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
-do
- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
-
- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
- exit 2
- fi
-done
-
-echo "OK: All haproxy queue depths looking good"
-exit 0
-
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 532a1dc..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
- network_get_primary_address,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
- resolve_network_cidr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'binding': 'public',
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'binding': 'internal',
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'binding': 'admin',
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured, or a Juju 2.0 extra-binding has been used.
-
- :param endpoint_type: Network endpoing type
- """
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- binding = ADDRESS_MAP[endpoint_type]['binding']
- clustered = is_clustered()
-
- if clustered and vips:
- if net_addr:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- # NOTE: endeavour to check vips against network space
- # bindings
- try:
- bound_cidr = resolve_network_cidr(
- network_get_primary_address(binding)
- )
- for vip in vips:
- if is_address_in_network(bound_cidr, vip):
- resolved_address = vip
- break
- except NotImplementedError:
- # If no net-splits configured and no support for extra
- # bindings/network spaces so we expect a single vip
- resolved_address = vips[0]
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- if net_addr:
- resolved_address = get_address_in_network(net_addr, fallback_addr)
- else:
- # NOTE: only try to use extra bindings if legacy network
- # configuration is not in use
- try:
- resolved_address = network_get_primary_address(binding)
- except NotImplementedError:
- resolved_address = fallback_addr
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index d057ea6..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return [headers_package(), 'openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': ['plumgrid-lxc',
- 'iovisor-dkms'],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- },
- 'midonet': {
- 'config': '/etc/neutron/plugins/midonet/midonet.ini',
- 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [determine_dkms_package()],
- 'server_packages': ['neutron-server',
- 'python-neutron-plugin-midonet'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- if release >= 'kilo':
- plugins['midonet']['driver'] = (
- 'neutron.plugins.midonet.plugin.MidonetPluginV2')
- if release >= 'liberty':
- plugins['midonet']['driver'] = (
- 'midonet.neutron.plugin_v1.MidonetPluginV2')
- plugins['midonet']['server_packages'].remove(
- 'python-neutron-plugin-midonet')
- plugins['midonet']['server_packages'].append(
- 'python-networking-midonet')
- plugins['plumgrid']['driver'] = (
- 'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
- plugins['plumgrid']['server_packages'].remove(
- 'neutron-plugin-plumgrid')
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of bridge:port.
-
- Returns dict of the form {port:bridge} where ports may be mac addresses or
- interface names.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf
deleted file mode 100644
index 33ceee2..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/ceph.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# cinder configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[global]
-{% if auth -%}
-auth_supported = {{ auth }}
-keyring = /etc/ceph/$cluster.$name.keyring
-mon host = {{ mon_hosts }}
-{% endif -%}
-log to syslog = {{ use_syslog }}
-err to syslog = {{ use_syslog }}
-clog to syslog = {{ use_syslog }}
-
-[client]
-{% if rbd_client_cache_settings -%}
-{% for key, value in rbd_client_cache_settings.iteritems() -%}
-{{ key }} = {{ value }}
-{% endfor -%}
-{%- endif %} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart
deleted file mode 100644
index 4bed404..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/git.upstart
+++ /dev/null
@@ -1,17 +0,0 @@
-description "{{ service_description }}"
-author "Juju {{ service_name }} Charm <juju@localhost>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-exec start-stop-daemon --start --chuid {{ user_name }} \
- --chdir {{ start_dir }} --name {{ process_name }} \
- --exec {{ executable_name }} -- \
- {% for config_file in config_files -%}
- --config-file={{ config_file }} \
- {% endfor -%}
- {% if log_file -%}
- --log-file={{ log_file }}
- {% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg
deleted file mode 100644
index 32b6276..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ /dev/null
@@ -1,66 +0,0 @@
-global
- log {{ local_host }} local0
- log {{ local_host }} local1 notice
- maxconn 20000
- user haproxy
- group haproxy
- spread-checks 0
-
-defaults
- log global
- mode tcp
- option tcplog
- option dontlognull
- retries 3
-{%- if haproxy_queue_timeout %}
- timeout queue {{ haproxy_queue_timeout }}
-{%- else %}
- timeout queue 5000
-{%- endif %}
-{%- if haproxy_connect_timeout %}
- timeout connect {{ haproxy_connect_timeout }}
-{%- else %}
- timeout connect 5000
-{%- endif %}
-{%- if haproxy_client_timeout %}
- timeout client {{ haproxy_client_timeout }}
-{%- else %}
- timeout client 30000
-{%- endif %}
-{%- if haproxy_server_timeout %}
- timeout server {{ haproxy_server_timeout }}
-{%- else %}
- timeout server 30000
-{%- endif %}
-
-listen stats
- bind {{ local_host }}:{{ stat_port }}
- mode http
- stats enable
- stats hide-version
- stats realm Haproxy\ Statistics
- stats uri /
- stats auth admin:{{ stat_password }}
-
-{% if frontends -%}
-{% for service, ports in service_ports.items() -%}
-frontend tcp-in_{{ service }}
- bind *:{{ ports[0] }}
- {% if ipv6 -%}
- bind :::{{ ports[0] }}
- {% endif -%}
- {% for frontend in frontends -%}
- acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
- use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
- {% endfor -%}
- default_backend {{ service }}_{{ default_backend }}
-
-{% for frontend in frontends -%}
-backend {{ service }}_{{ frontend }}
- balance leastconn
- {% for unit, address in frontends[frontend]['backends'].items() -%}
- server {{ unit }} {{ address }}:{{ ports[1] }} check
- {% endfor %}
-{% endfor -%}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend
deleted file mode 100644
index 6a92380..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend
+++ /dev/null
@@ -1,26 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
- SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
deleted file mode 100644
index 6a92380..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
- SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
deleted file mode 100644
index 5dcebe7..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ /dev/null
@@ -1,12 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-auth_plugin = password
-project_domain_id = default
-user_domain_id = default
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
deleted file mode 100644
index 9356b2b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy
+++ /dev/null
@@ -1,10 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-# Juno specific config (Bug #1557223)
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
deleted file mode 100644
index dd6f364..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka
+++ /dev/null
@@ -1,12 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
-auth_type = password
-project_domain_name = default
-user_domain_name = default
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
deleted file mode 100644
index b444c9c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-[oslo_messaging_rabbit]
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq
deleted file mode 100644
index 95f1a76..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templates/section-zeromq
+++ /dev/null
@@ -1,14 +0,0 @@
-{% if zmq_host -%}
-# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
-rpc_backend = zmq
-rpc_zmq_host = {{ zmq_host }}
-{% if zmq_redis_address -%}
-rpc_zmq_matchmaker = redis
-matchmaker_heartbeat_freq = 15
-matchmaker_heartbeat_ttl = 30
-[matchmaker_redis]
-host = {{ zmq_redis_address }}
-{% else -%}
-rpc_zmq_matchmaker = ring
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index e5e3cb1..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 115cc4b..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,1576 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-import itertools
-import functools
-
-import six
-import tempfile
-import traceback
-import uuid
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- action_fail,
- action_set,
- config,
- log as juju_log,
- charm_dir,
- DEBUG,
- INFO,
- related_units,
- relation_ids,
- relation_set,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr,
- is_ipv6,
- port_has_listener,
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import (
- lsb_release,
- mounts,
- umount,
- service_running,
- service_pause,
- service_resume,
- restart_on_change_helper,
-)
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
- ('2016.1', 'mitaka'),
-])
-
-# The ugly duckling - must list releases oldest to newest
-SWIFT_CODENAMES = OrderedDict([
- ('diablo',
- ['1.4.3']),
- ('essex',
- ['1.4.8']),
- ('folsom',
- ['1.7.4']),
- ('grizzly',
- ['1.7.6', '1.7.7', '1.8.0']),
- ('havana',
- ['1.9.0', '1.9.1', '1.10.0']),
- ('icehouse',
- ['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
- ('juno',
- ['2.0.0', '2.1.0', '2.2.0']),
- ('kilo',
- ['2.2.1', '2.2.2']),
- ('liberty',
- ['2.3.0', '2.4.0', '2.5.0']),
- ('mitaka',
- ['2.5.0', '2.6.0', '2.7.0']),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12.0', 'liberty'),
- ('13.0', 'mitaka'),
- ]),
- 'neutron-common': OrderedDict([
- ('7.0', 'liberty'),
- ('8.0', 'mitaka'),
- ('8.1', 'mitaka'),
- ]),
- 'cinder-common': OrderedDict([
- ('7.0', 'liberty'),
- ('8.0', 'mitaka'),
- ]),
- 'keystone': OrderedDict([
- ('8.0', 'liberty'),
- ('8.1', 'liberty'),
- ('9.0', 'mitaka'),
- ]),
- 'horizon-common': OrderedDict([
- ('8.0', 'liberty'),
- ('9.0', 'mitaka'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5.0', 'liberty'),
- ('6.1', 'mitaka'),
- ]),
- 'heat-common': OrderedDict([
- ('5.0', 'liberty'),
- ('6.0', 'mitaka'),
- ]),
- 'glance-common': OrderedDict([
- ('11.0', 'liberty'),
- ('12.0', 'mitaka'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8.0', 'liberty'),
- ('9.0', 'mitaka'),
- ]),
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_version_codename_swift(codename):
- '''Determine OpenStack version number of swift from codename.'''
- for k, v in six.iteritems(SWIFT_CODENAMES):
- if k == codename:
- return v[-1]
- e = 'Could not derive swift version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_swift_codename(version):
- '''Determine OpenStack codename that corresponds to swift version.'''
- codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
- if len(codenames) > 1:
- # If more than one release codename contains this version we determine
- # the actual codename based on the highest available install source.
- for codename in reversed(codenames):
- releases = UBUNTU_OPENSTACK_RELEASE
- release = [k for k, v in six.iteritems(releases) if codename in v]
- ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
- if codename in ret or release[0] in ret:
- return codename
- elif len(codenames) == 1:
- return codenames[0]
- return None
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- if 'swift' in pkg.name:
- # Fully x.y.z match for swift versions
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- else:
- # x.y match only for 20XX.X
- # and ignore patch level for other packages
- match = re.match('^(\d+)\.(\d+)', vers)
-
- if match:
- vers = match.group(0)
-
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- return get_swift_codename(vers)
- else:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- for cname, version in six.iteritems(vers_map):
- if cname == codename:
- return version[-1]
- else:
- vers_map = OPENSTACK_CODENAMES
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- key = keyid.strip()
- if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
- key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
- juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
- juju_log("Importing ASCII Armor PGP key", level=DEBUG)
- with tempfile.NamedTemporaryFile() as keyfile:
- with open(keyfile.name, 'w') as fd:
- fd.write(key)
- fd.write("\n")
-
- cmd = ['apt-key', 'add', keyfile.name]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
- else:
- juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
- juju_log("Importing PGP key from keyserver", level=DEBUG)
- cmd = ['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error_out("Error importing PGP key '%s'" % key)
-
-
-def get_source_and_pgp_key(input):
- """Look for a pgp key ID or ascii-armor key in the given input."""
- index = input.strip()
- index = input.rfind('|')
- if index < 0:
- return input, None
-
- key = input[index + 1:].strip('|')
- source = input[:index]
- return source, key
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- src, key = get_source_and_pgp_key(rel)
- if key:
- import_key(key)
-
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- avail_vers = get_os_version_codename_swift(codename)
- else:
- avail_vers = get_os_version_install_source(src)
- apt.init()
- if "swift" in package:
- major_cur_vers = cur_vers.split('.', 1)[0]
- major_avail_vers = avail_vers.split('.', 1)[0]
- major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
- return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
- return apt.version_compare(avail_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- if config('vip'):
- vips = config('vip').split()
- for vip in vips:
- if vip and is_ipv6(vip):
- hosts.append(vip)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-requirements_dir = None
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-def git_clone_and_install(projects_yaml, core_project):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- depth = '1'
- if 'depth' in p.keys():
- depth = p['depth']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements):
- """
- Clone and install a single git repository.
- """
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(
- repo, dest=parent_dir, branch=branch, depth=depth)
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv)
- else:
- pip_install(repo_dir, venv=venv)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Set the state of the workload status for the charm.
-
- This calls _determine_os_workload_status() to get the new state, message
- and sets the status using status_set()
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _determine_os_workload_status(
- configs, required_interfaces, charm_func, services, ports)
- status_set(state, message)
-
-
-def _determine_os_workload_status(
- configs, required_interfaces, charm_func=None,
- services=None, ports=None):
- """Determine the state of the workload status for the charm.
-
- This function returns the new workload status for the charm based
- on the state of the interfaces, the paused state and whether the
- services are actually running and any specified ports are open.
-
- This checks:
-
- 1. if the unit should be paused, that it is actually paused. If so the
- state is 'maintenance' + message, else 'broken'.
- 2. that the interfaces/relations are complete. If they are not then
- it sets the state to either 'broken' or 'waiting' and an appropriate
- message.
- 3. If all the relation data is set, then it checks that the actual
- services really are running. If not it sets the state to 'broken'.
-
- If everything is okay then the state returns 'active'.
-
- @param configs: a templating.OSConfigRenderer() object
- @param required_interfaces: {generic: [specific, specific2, ...]}
- @param charm_func: a callable function that returns state, message. The
- signature is charm_func(configs) -> (state, message)
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: OPTIONAL list of port numbers.
- @returns state, message: the new workload status, user message
- """
- state, message = _ows_check_if_paused(services, ports)
-
- if state is None:
- state, message = _ows_check_generic_interfaces(
- configs, required_interfaces)
-
- if state != 'maintenance' and charm_func:
- # _ows_check_charm_func() may modify the state, message
- state, message = _ows_check_charm_func(
- state, message, lambda: charm_func(configs))
-
- if state is None:
- state, message = _ows_check_services_running(services, ports)
-
- if state is None:
- state = 'active'
- message = "Unit is ready"
- juju_log(message, 'INFO')
-
- return state, message
-
-
-def _ows_check_if_paused(services=None, ports=None):
- """Check if the unit is supposed to be paused, and if so check that the
- services/ports (if passed) are actually stopped/not being listened to.
-
- if the unit isn't supposed to be paused, just return None, None
-
- @param services: OPTIONAL services spec or list of service names.
- @param ports: OPTIONAL list of port numbers.
- @returns state, message or None, None
- """
- if is_unit_paused_set():
- state, message = check_actually_paused(services=services,
- ports=ports)
- if state is None:
- # we're paused okay, so set maintenance and return
- state = "maintenance"
- message = "Paused. Use 'resume' action to resume normal service."
- return state, message
- return None, None
-
-
-def _ows_check_generic_interfaces(configs, required_interfaces):
- """Check the complete contexts to determine the workload status.
-
- - Checks for missing or incomplete contexts
- - juju log details of missing required data.
- - determines the correct workload status
- - creates an appropriate message for status_set(...)
-
- if there are no problems then the function returns None, None
-
- @param configs: a templating.OSConfigRenderer() object
- @params required_interfaces: {generic_interface: [specific_interface], }
- @returns state, message or None, None
- """
- incomplete_rel_data = incomplete_relation_data(configs,
- required_interfaces)
- state = None
- message = None
- missing_relations = set()
- incomplete_relations = set()
-
- for generic_interface, relations_states in incomplete_rel_data.items():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface, relation_state in relations_states.items():
- if relation_state.get('related'):
- related_interface = interface
- missing_data = relation_state.get('missing_data')
- break
- # No relation ID for the generic_interface?
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- missing_relations.add(generic_interface)
- else:
- # Relation ID eists but no related unit
- if not missing_data:
- # Edge case - relation ID exists but departings
- _hook_name = hook_name()
- if (('departed' in _hook_name or 'broken' in _hook_name) and
- related_interface in _hook_name):
- state = 'blocked'
- missing_relations.add(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface),
- "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has"
- " no units in the relation."
- "".format(generic_interface, related_interface),
- "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in missing_relations:
- incomplete_relations.add(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- return state, message
-
-
-def _ows_check_charm_func(state, message, charm_func_with_configs):
- """Run a custom check function for the charm to see if it wants to
- change the state. This is only run if not in 'maintenance' and
- tests to see if the new state is more important that the previous
- one determined by the interfaces/relations check.
-
- @param state: the previously determined state so far.
- @param message: the user orientated message so far.
- @param charm_func: a callable function that returns state, message
- @returns state, message strings.
- """
- if charm_func_with_configs:
- charm_state, charm_message = charm_func_with_configs()
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- charm_message = charm_message.replace("Incomplete relations: ",
- "")
- message = "{}, {}".format(message, charm_message)
- else:
- message = charm_message
- return state, message
-
-
-def _ows_check_services_running(services, ports):
- """Check that the services that should be running are actually running
- and that any ports specified are being listened to.
-
- @param services: list of strings OR dictionary specifying services/ports
- @param ports: list of ports
- @returns state, message: strings or None, None
- """
- messages = []
- state = None
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, running = _check_running_services(services)
- if not all(running):
- messages.append(
- "Services not running that should be: {}"
- .format(", ".join(_filter_tuples(services_running, False))))
- state = 'blocked'
- # also verify that the ports that should be open are open
- # NB, that ServiceManager objects only OPTIONALLY have ports
- map_not_open, ports_open = (
- _check_listening_on_services_ports(services))
- if not all(ports_open):
- # find which service has missing ports. They are in service
- # order which makes it a bit easier.
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in map_not_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "Services with ports not open that should be: {}"
- .format(message))
- state = 'blocked'
-
- if ports is not None:
- # and we can also check ports which we don't know the service for
- ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
- if not all(ports_open_bools):
- messages.append(
- "Ports which should be open, but are not: {}"
- .format(", ".join([str(p) for p, v in ports_open
- if not v])))
- state = 'blocked'
-
- if state is not None:
- message = "; ".join(messages)
- return state, message
-
- return None, None
-
-
-def _extract_services_list_helper(services):
- """Extract a OrderedDict of {service: [ports]} of the supplied services
- for use by the other functions.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param services: see above
- @returns OrderedDict(service: [ports], ...)
- """
- if services is None:
- return {}
- if isinstance(services, dict):
- services = services.values()
- # either extract the list of services from the dictionary, or if
- # it is a simple string, use that. i.e. works with mixed lists.
- _s = OrderedDict()
- for s in services:
- if isinstance(s, dict) and 'service' in s:
- _s[s['service']] = s.get('ports', [])
- if isinstance(s, str):
- _s[s] = []
- return _s
-
-
-def _check_running_services(services):
- """Check that the services dict provided is actually running and provide
- a list of (service, boolean) tuples for each service.
-
- Returns both a zipped list of (service, boolean) and a list of booleans
- in the same order as the services.
-
- @param services: OrderedDict of strings: [ports], one for each service to
- check.
- @returns [(service, boolean), ...], : results for checks
- [boolean] : just the result of the service checks
- """
- services_running = [service_running(s) for s in services]
- return list(zip(services, services_running)), services_running
-
-
-def _check_listening_on_services_ports(services, test=False):
- """Check that the unit is actually listening (has the port open) on the
- ports that the service specifies are open. If test is True then the
- function returns the services with ports that are open rather than
- closed.
-
- Returns an OrderedDict of service: ports and a list of booleans
-
- @param services: OrderedDict(service: [port, ...], ...)
- @param test: default=False, if False, test for closed, otherwise open.
- @returns OrderedDict(service: [port-not-open, ...]...), [boolean]
- """
- test = not(not(test)) # ensure test is True or False
- all_ports = list(itertools.chain(*services.values()))
- ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
- map_ports = OrderedDict()
- matched_ports = [p for p, opened in zip(all_ports, ports_states)
- if opened == test] # essentially opened xor test
- for service, ports in services.items():
- set_ports = set(ports).intersection(matched_ports)
- if set_ports:
- map_ports[service] = set_ports
- return map_ports, ports_states
-
-
-def _check_listening_on_ports_list(ports):
- """Check that the ports list given are being listened to
-
- Returns a list of ports being listened to and a list of the
- booleans.
-
- @param ports: LIST or port numbers.
- @returns [(port_num, boolean), ...], [boolean]
- """
- ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
- return zip(ports, ports_open), ports_open
-
-
-def _filter_tuples(services_states, state):
- """Return a simple list from a list of tuples according to the condition
-
- @param services_states: LIST of (string, boolean): service and running
- state.
- @param state: Boolean to match the tuple against.
- @returns [LIST of strings] that matched the tuple RHS.
- """
- return [s for s, b in services_states if b == state]
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = [
- svc_type
- for svc_type, interfaces in required_interfaces.items()
- if not set(interfaces).intersection(complete_ctxts)]
- return {
- i: configs.get_incomplete_context_data(required_interfaces[i])
- for i in incomplete_relations}
-
-
-def do_action_openstack_upgrade(package, upgrade_callback, configs):
- """Perform action-managed OpenStack upgrade.
-
- Upgrades packages to the configured openstack-origin version and sets
- the corresponding action status as a result.
-
- If the charm was installed from source we cannot upgrade it.
- For backwards compatibility a config flag (action-managed-upgrade) must
- be set for this code to run, otherwise a full service level upgrade will
- fire on config-changed.
-
- @param package: package name for determining if upgrade available
- @param upgrade_callback: function callback to charm's upgrade function
- @param configs: templating object derived from OSConfigRenderer class
-
- @return: True if upgrade successful; False if upgrade failed or skipped
- """
- ret = False
-
- if git_install_requested():
- action_set({'outcome': 'installed from source, skipped upgrade.'})
- else:
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
-
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
- else:
- action_set({'outcome': 'no upgrade available.'})
-
- return ret
-
-
-def remote_restart(rel_name, remote_service=None):
- trigger = {
- 'restart-trigger': str(uuid.uuid4()),
- }
- if remote_service:
- trigger['remote-service'] = remote_service
- for rid in relation_ids(rel_name):
- # This subordinate can be related to two seperate services using
- # different subordinate relations so only issue the restart if
- # the principle is conencted down the relation we think it is
- if related_units(relid=rid):
- relation_set(relation_id=rid,
- relation_settings=trigger,
- )
-
-
-def check_actually_paused(services=None, ports=None):
- """Check that services listed in the services object and and ports
- are actually closed (not listened to), to verify that the unit is
- properly paused.
-
- @param services: See _extract_services_list_helper
- @returns status, : string for status (None if okay)
- message : string for problem for status_set
- """
- state = None
- message = None
- messages = []
- if services is not None:
- services = _extract_services_list_helper(services)
- services_running, services_states = _check_running_services(services)
- if any(services_states):
- # there shouldn't be any running so this is a problem
- messages.append("these services running: {}"
- .format(", ".join(
- _filter_tuples(services_running, True))))
- state = "blocked"
- ports_open, ports_open_bools = (
- _check_listening_on_services_ports(services, True))
- if any(ports_open_bools):
- message_parts = {service: ", ".join([str(v) for v in open_ports])
- for service, open_ports in ports_open.items()}
- message = ", ".join(
- ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
- messages.append(
- "these service:ports are open: {}".format(message))
- state = 'blocked'
- if ports is not None:
- ports_open, bools = _check_listening_on_ports_list(ports)
- if any(bools):
- messages.append(
- "these ports which should be closed, but are open: {}"
- .format(", ".join([str(p) for p, v in ports_open if v])))
- state = 'blocked'
- if messages:
- message = ("Services should be paused but {}"
- .format(", ".join(messages)))
- return state, message
-
-
-def set_unit_paused():
- """Set the unit to a paused state in the local kv() store.
- This does NOT actually pause the unit
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', True)
-
-
-def clear_unit_paused():
- """Clear the unit from a paused state in the local kv() store
- This does NOT actually restart any services - it only clears the
- local state.
- """
- with unitdata.HookData()() as t:
- kv = t[0]
- kv.set('unit-paused', False)
-
-
-def is_unit_paused_set():
- """Return the state of the kv().get('unit-paused').
- This does NOT verify that the unit really is paused.
-
- To help with units that don't have HookData() (testing)
- if it excepts, return False
- """
- try:
- with unitdata.HookData()() as t:
- kv = t[0]
- # transform something truth-y into a Boolean.
- return not(not(kv.get('unit-paused')))
- except:
- return False
-
-
-def pause_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Pause a unit by stopping the services and setting 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have stopped and ports are no longer
- being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None, None to indicate that the unit
- didn't pause cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are stopped, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm pausing.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- stopped = service_pause(service)
- if not stopped:
- messages.append("{} didn't stop cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- set_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't pause: {}".format("; ".join(messages)))
-
-
-def resume_unit(assess_status_func, services=None, ports=None,
- charm_func=None):
- """Resume a unit by starting the services and clearning 'unit-paused'
- in the local kv() store.
-
- Also checks that the services have started and ports are being listened to.
-
- An optional charm_func() can be called that can either raise an
- Exception or return non None to indicate that the unit
- didn't resume cleanly.
-
- The signature for charm_func is:
- charm_func() -> message: string
-
- charm_func() is executed after any services are started, if supplied.
-
- The services object can either be:
- - None : no services were passed (an empty dict is returned)
- - a list of strings
- - A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- - An array of [{'service': service_name, ...}, ...]
-
- @param assess_status_func: (f() -> message: string | None) or None
- @param services: OPTIONAL see above
- @param ports: OPTIONAL list of port
- @param charm_func: function to run for custom charm resuming.
- @returns None
- @raises Exception(message) on an error for action_fail().
- """
- services = _extract_services_list_helper(services)
- messages = []
- if services:
- for service in services.keys():
- started = service_resume(service)
- if not started:
- messages.append("{} didn't start cleanly.".format(service))
- if charm_func:
- try:
- message = charm_func()
- if message:
- messages.append(message)
- except Exception as e:
- message.append(str(e))
- clear_unit_paused()
- if assess_status_func:
- message = assess_status_func()
- if message:
- messages.append(message)
- if messages:
- raise Exception("Couldn't resume: {}".format("; ".join(messages)))
-
-
-def make_assess_status_func(*args, **kwargs):
- """Creates an assess_status_func() suitable for handing to pause_unit()
- and resume_unit().
-
- This uses the _determine_os_workload_status(...) function to determine
- what the workload_status should be for the unit. If the unit is
- not in maintenance or active states, then the message is returned to
- the caller. This is so an action that doesn't result in either a
- complete pause or complete resume can signal failure with an action_fail()
- """
- def _assess_status_func():
- state, message = _determine_os_workload_status(*args, **kwargs)
- status_set(state, message)
- if state not in ['maintenance', 'active']:
- return message
- return None
-
- return _assess_status_func
-
-
-def pausable_restart_on_change(restart_map, stopstart=False,
- restart_functions=None):
- """A restart_on_change decorator that checks to see if the unit is
- paused. If it is paused then the decorated function doesn't fire.
-
- This is provided as a helper, as the @restart_on_change(...) decorator
- is in core.host, yet the openstack specific helpers are in this file
- (contrib.openstack.utils). Thus, this needs to be an optional feature
- for openstack charms (or charms that wish to use the openstack
- pause/resume type features).
-
- It is used as follows:
-
- from contrib.openstack.utils import (
- pausable_restart_on_change as restart_on_change)
-
- @restart_on_change(restart_map, stopstart=<boolean>)
- def some_hook(...):
- pass
-
- see core.utils.restart_on_change() for more details.
-
- @param f: the function to decorate
- @param restart_map: the restart map {conf_file: [services]}
- @param stopstart: DEFAULT false; whether to stop, start or just restart
- @returns decorator to use a restart_on_change with pausability
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- if is_unit_paused_set():
- return f(*args, **kwargs)
- # otherwise, normal restart_on_change functionality
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py
deleted file mode 100644
index eafca44..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/peerstorage/__init__.py
+++ /dev/null
@@ -1,269 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import six
-
-from charmhelpers.core.hookenv import relation_id as current_relation_id
-from charmhelpers.core.hookenv import (
- is_relation_made,
- relation_ids,
- relation_get as _relation_get,
- local_unit,
- relation_set as _relation_set,
- leader_get as _leader_get,
- leader_set,
- is_leader,
-)
-
-
-"""
-This helper provides functions to support use of a peer relation
-for basic key/value storage, with the added benefit that all storage
-can be replicated across peer units.
-
-Requirement to use:
-
-To use this, the "peer_echo()" method has to be called form the peer
-relation's relation-changed hook:
-
-@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
-def cluster_relation_changed():
- peer_echo()
-
-Once this is done, you can use peer storage from anywhere:
-
-@hooks.hook("some-hook")
-def some_hook():
- # You can store and retrieve key/values this way:
- if is_relation_made("cluster"): # from charmhelpers.core.hookenv
- # There are peers available so we can work with peer storage
- peer_store("mykey", "myvalue")
- value = peer_retrieve("mykey")
- print value
- else:
- print "No peers joind the relation, cannot share key/values :("
-"""
-
-
-def leader_get(attribute=None, rid=None):
- """Wrapper to ensure that settings are migrated from the peer relation.
-
- This is to support upgrading an environment that does not support
- Juju leadership election to one that does.
-
- If a setting is not extant in the leader-get but is on the relation-get
- peer rel, it is migrated and marked as such so that it is not re-migrated.
- """
- migration_key = '__leader_get_migrated_settings__'
- if not is_leader():
- return _leader_get(attribute=attribute)
-
- settings_migrated = False
- leader_settings = _leader_get(attribute=attribute)
- previously_migrated = _leader_get(attribute=migration_key)
-
- if previously_migrated:
- migrated = set(json.loads(previously_migrated))
- else:
- migrated = set([])
-
- try:
- if migration_key in leader_settings:
- del leader_settings[migration_key]
- except TypeError:
- pass
-
- if attribute:
- if attribute in migrated:
- return leader_settings
-
- # If attribute not present in leader db, check if this unit has set
- # the attribute in the peer relation
- if not leader_settings:
- peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
- rid=rid)
- if peer_setting:
- leader_set(settings={attribute: peer_setting})
- leader_settings = peer_setting
-
- if leader_settings:
- settings_migrated = True
- migrated.add(attribute)
- else:
- r_settings = _relation_get(unit=local_unit(), rid=rid)
- if r_settings:
- for key in set(r_settings.keys()).difference(migrated):
- # Leader setting wins
- if not leader_settings.get(key):
- leader_settings[key] = r_settings[key]
-
- settings_migrated = True
- migrated.add(key)
-
- if settings_migrated:
- leader_set(**leader_settings)
-
- if migrated and settings_migrated:
- migrated = json.dumps(list(migrated))
- leader_set(settings={migration_key: migrated})
-
- return leader_settings
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Attempt to use leader-set if supported in the current version of Juju,
- otherwise falls back on relation-set.
-
- Note that we only attempt to use leader-set if the provided relation_id is
- a peer relation id or no relation id is provided (in which case we assume
- we are within the peer relation context).
- """
- try:
- if relation_id in relation_ids('cluster'):
- return leader_set(settings=relation_settings, **kwargs)
- else:
- raise NotImplementedError
- except NotImplementedError:
- return _relation_set(relation_id=relation_id,
- relation_settings=relation_settings, **kwargs)
-
-
-def relation_get(attribute=None, unit=None, rid=None):
- """Attempt to use leader-get if supported in the current version of Juju,
- otherwise falls back on relation-get.
-
- Note that we only attempt to use leader-get if the provided rid is a peer
- relation id or no relation id is provided (in which case we assume we are
- within the peer relation context).
- """
- try:
- if rid in relation_ids('cluster'):
- return leader_get(attribute, rid)
- else:
- raise NotImplementedError
- except NotImplementedError:
- return _relation_get(attribute=attribute, rid=rid, unit=unit)
-
-
-def peer_retrieve(key, relation_name='cluster'):
- """Retrieve a named key from peer relation `relation_name`."""
- cluster_rels = relation_ids(relation_name)
- if len(cluster_rels) > 0:
- cluster_rid = cluster_rels[0]
- return relation_get(attribute=key, rid=cluster_rid,
- unit=local_unit())
- else:
- raise ValueError('Unable to detect'
- 'peer relation {}'.format(relation_name))
-
-
-def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
- inc_list=None, exc_list=None):
- """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
- inc_list = inc_list if inc_list else []
- exc_list = exc_list if exc_list else []
- peerdb_settings = peer_retrieve('-', relation_name=relation_name)
- matched = {}
- if peerdb_settings is None:
- return matched
- for k, v in peerdb_settings.items():
- full_prefix = prefix + delimiter
- if k.startswith(full_prefix):
- new_key = k.replace(full_prefix, '')
- if new_key in exc_list:
- continue
- if new_key in inc_list or len(inc_list) == 0:
- matched[new_key] = v
- return matched
-
-
-def peer_store(key, value, relation_name='cluster'):
- """Store the key/value pair on the named peer relation `relation_name`."""
- cluster_rels = relation_ids(relation_name)
- if len(cluster_rels) > 0:
- cluster_rid = cluster_rels[0]
- relation_set(relation_id=cluster_rid,
- relation_settings={key: value})
- else:
- raise ValueError('Unable to detect '
- 'peer relation {}'.format(relation_name))
-
-
-def peer_echo(includes=None, force=False):
- """Echo filtered attributes back onto the same relation for storage.
-
- This is a requirement to use the peerstorage module - it needs to be called
- from the peer relation's changed hook.
-
- If Juju leader support exists this will be a noop unless force is True.
- """
- try:
- is_leader()
- except NotImplementedError:
- pass
- else:
- if not force:
- return # NOOP if leader-election is supported
-
- # Use original non-leader calls
- relation_get = _relation_get
- relation_set = _relation_set
-
- rdata = relation_get()
- echo_data = {}
- if includes is None:
- echo_data = rdata.copy()
- for ex in ['private-address', 'public-address']:
- if ex in echo_data:
- echo_data.pop(ex)
- else:
- for attribute, value in six.iteritems(rdata):
- for include in includes:
- if include in attribute:
- echo_data[attribute] = value
- if len(echo_data) > 0:
- relation_set(relation_settings=echo_data)
-
-
-def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
- peer_store_fatal=False, relation_settings=None,
- delimiter='_', **kwargs):
- """Store passed-in arguments both in argument relation and in peer storage.
-
- It functions like doing relation_set() and peer_store() at the same time,
- with the same data.
-
- @param relation_id: the id of the relation to store the data on. Defaults
- to the current relation.
- @param peer_store_fatal: Set to True, the function will raise an exception
- should the peer sotrage not be avialable."""
-
- relation_settings = relation_settings if relation_settings else {}
- relation_set(relation_id=relation_id,
- relation_settings=relation_settings,
- **kwargs)
- if is_relation_made(peer_relation_name):
- for key, value in six.iteritems(dict(list(kwargs.items()) +
- list(relation_settings.items()))):
- key_prefix = relation_id or current_relation_id()
- peer_store(key_prefix + delimiter + key,
- value,
- relation_name=peer_relation_name)
- else:
- if peer_store_fatal:
- raise ValueError('Unable to detect '
- 'peer relation {}'.format(peer_relation_name))
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py b/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index a2411c3..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-import sys
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def pip_execute(*args, **kwargs):
- """Overriden pip_execute() to stop sys.path being changed.
-
- The act of importing main from the pip module seems to cause add wheels
- from the /usr/share/python-wheels which are installed by various tools.
- This function ensures that sys.path remains the same after the call is
- executed.
- """
- try:
- _path = sys.path
- try:
- from pip import main as _pip_execute
- except ImportError:
- apt_update()
- apt_install('python-pip')
- from pip import main as _pip_execute
- _pip_execute(*args, **kwargs)
- finally:
- sys.path = _path
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, constraints=None, **options):
- """Install a requirements file.
-
- :param constraints: Path to pip constraints file.
- http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
- """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- if constraints:
- command.append("-c {0}".format(constraints))
- log("Installing from file: {} with constraints {} "
- "and options: {}".format(requirements, constraints, command))
- else:
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- apt_install('python-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index d008081..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,1206 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-import bisect
-import errno
-import hashlib
-import six
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-
-from charmhelpers.core.kernel import modprobe
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
-powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
-
-
-def validator(value, valid_type, valid_range=None):
- """
- Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
- Example input:
- validator(value=1,
- valid_type=int,
- valid_range=[0, 2])
- This says I'm testing value=1. It must be an int inclusive in [0,2]
-
- :param value: The value to validate
- :param valid_type: The type that value should be.
- :param valid_range: A range of values that value can assume.
- :return:
- """
- assert isinstance(value, valid_type), "{} is not a {}".format(
- value,
- valid_type)
- if valid_range is not None:
- assert isinstance(valid_range, list), \
- "valid_range must be a list, was given {}".format(valid_range)
- # If we're dealing with strings
- if valid_type is six.string_types:
- assert value in valid_range, \
- "{} is not in the list {}".format(value, valid_range)
- # Integer, float should have a min and max
- else:
- if len(valid_range) != 2:
- raise ValueError(
- "Invalid valid_range list of {} for {}. "
- "List must be [min,max]".format(valid_range, value))
- assert value >= valid_range[0], \
- "{} is less than minimum allowed value of {}".format(
- value, valid_range[0])
- assert value <= valid_range[1], \
- "{} is greater than maximum allowed value of {}".format(
- value, valid_range[1])
-
-
-class PoolCreationError(Exception):
- """
- A custom error to inform the caller that a pool creation failed. Provides an error message
- """
-
- def __init__(self, message):
- super(PoolCreationError, self).__init__(message)
-
-
-class Pool(object):
- """
- An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
- Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
- """
-
- def __init__(self, service, name):
- self.service = service
- self.name = name
-
- # Create the pool if it doesn't exist already
- # To be implemented by subclasses
- def create(self):
- pass
-
- def add_cache_tier(self, cache_pool, mode):
- """
- Adds a new cache tier to an existing pool.
- :param cache_pool: six.string_types. The cache tier pool name to add.
- :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
- :return: None
- """
- # Check the input types and values
- validator(value=cache_pool, valid_type=six.string_types)
- validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
-
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
- check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
-
- def remove_cache_tier(self, cache_pool):
- """
- Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
- :param cache_pool: six.string_types. The cache tier pool name to remove.
- :return: None
- """
- # read-only is easy, writeback is much harder
- mode = get_cache_mode(self.service, cache_pool)
- version = ceph_version()
- if mode == 'readonly':
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- elif mode == 'writeback':
- pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
- 'cache-mode', cache_pool, 'forward']
- if version >= '10.1':
- # Jewel added a mandatory flag
- pool_forward_cmd.append('--yes-i-really-mean-it')
-
- check_call(pool_forward_cmd)
- # Flush the cache and wait for it to return
- check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
- check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
-
- def get_pgs(self, pool_size):
- """
- :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
- erasure coded pools
- :return: int. The number of pgs to use.
- """
- validator(value=pool_size, valid_type=int)
- osd_list = get_osds(self.service)
- if not osd_list:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- return 200
-
- osd_list_length = len(osd_list)
- # Calculate based on Ceph best practices
- if osd_list_length < 5:
- return 128
- elif 5 < osd_list_length < 10:
- return 512
- elif 10 < osd_list_length < 50:
- return 4096
- else:
- estimate = (osd_list_length * 100) / pool_size
- # Return the next nearest power of 2
- index = bisect.bisect_right(powers_of_two, estimate)
- return powers_of_two[index]
-
-
-class ReplicatedPool(Pool):
- def __init__(self, service, name, pg_num=None, replicas=2):
- super(ReplicatedPool, self).__init__(service=service, name=name)
- self.replicas = replicas
- if pg_num is None:
- self.pg_num = self.get_pgs(self.replicas)
- else:
- self.pg_num = pg_num
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
- self.name, str(self.pg_num)]
- try:
- check_call(cmd)
- # Set the pool replica size
- update_pool(client=self.service,
- pool=self.name,
- settings={'size': str(self.replicas)})
- except CalledProcessError:
- raise
-
-
-# Default jerasure erasure coded pool
-class ErasurePool(Pool):
- def __init__(self, service, name, erasure_code_profile="default"):
- super(ErasurePool, self).__init__(service=service, name=name)
- self.erasure_code_profile = erasure_code_profile
-
- def create(self):
- if not pool_exists(self.service, self.name):
- # Try to find the erasure profile information so we can properly size the pgs
- erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
-
- # Check for errors
- if erasure_profile is None:
- log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
- level=ERROR)
- raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
- if 'k' not in erasure_profile or 'm' not in erasure_profile:
- # Error
- log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
- level=ERROR)
- raise PoolCreationError(
- message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
-
- pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
- # Create it
- cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
- 'erasure', self.erasure_code_profile]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
- """Get an existing erasure code profile if it already exists.
- Returns json formatted output"""
-
-
-def get_mon_map(service):
- """
- Returns the current monitor map.
- :param service: six.string_types. The Ceph user name to run the command under
- :return: json string. :raise: ValueError if the monmap fails to parse.
- Also raises CalledProcessError if our ceph command fails
- """
- try:
- mon_status = check_output(
- ['ceph', '--id', service,
- 'mon_status', '--format=json'])
- try:
- return json.loads(mon_status)
- except ValueError as v:
- log("Unable to parse mon_status json: {}. Error: {}".format(
- mon_status, v.message))
- raise
- except CalledProcessError as e:
- log("mon_status command failed with message: {}".format(
- e.message))
- raise
-
-
-def hash_monitor_names(service):
- """
- Uses the get_mon_map() function to get information about the monitor
- cluster.
- Hash the name of each monitor. Return a sorted list of monitor hashes
- in an ascending order.
- :param service: six.string_types. The Ceph user name to run the command under
- :rtype : dict. json dict of monitor name, ip address and rank
- example: {
- 'name': 'ip-172-31-13-165',
- 'rank': 0,
- 'addr': '172.31.13.165:6789/0'}
- """
- try:
- hash_list = []
- monitor_list = get_mon_map(service=service)
- if monitor_list['monmap']['mons']:
- for mon in monitor_list['monmap']['mons']:
- hash_list.append(
- hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
- return sorted(hash_list)
- else:
- return None
- except (ValueError, CalledProcessError):
- raise
-
-
-def monitor_key_delete(service, key):
- """
- Delete a key and value pair from the monitor cluster
- :param service: six.string_types. The Ceph user name to run the command under
- Deletes a key value pair on the monitor cluster.
- :param key: six.string_types. The key to delete.
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'del', str(key)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_set(service, key, value):
- """
- Sets a key value pair on the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to set.
- :param value: The value to set. This will be converted to a string
- before setting
- """
- try:
- check_output(
- ['ceph', '--id', service,
- 'config-key', 'put', str(key), str(value)])
- except CalledProcessError as e:
- log("Monitor config-key put failed with message: {}".format(
- e.output))
- raise
-
-
-def monitor_key_get(service, key):
- """
- Gets the value of an existing key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for.
- :return: Returns the value of that key or None if not found.
- """
- try:
- output = check_output(
- ['ceph', '--id', service,
- 'config-key', 'get', str(key)])
- return output
- except CalledProcessError as e:
- log("Monitor config-key get failed with message: {}".format(
- e.output))
- return None
-
-
-def monitor_key_exists(service, key):
- """
- Searches for the existence of a key in the monitor cluster.
- :param service: six.string_types. The Ceph user name to run the command under
- :param key: six.string_types. The key to search for
- :return: Returns True if the key exists, False if not and raises an
- exception if an unknown error occurs. :raise: CalledProcessError if
- an unknown error occurs
- """
- try:
- check_call(
- ['ceph', '--id', service,
- 'config-key', 'exists', str(key)])
- # I can return true here regardless because Ceph returns
- # ENOENT if the key wasn't found
- return True
- except CalledProcessError as e:
- if e.returncode == errno.ENOENT:
- return False
- else:
- log("Unknown error from ceph config-get exists: {} {}".format(
- e.returncode, e.output))
- raise
-
-
-def get_erasure_profile(service, name):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param name:
- :return:
- """
- try:
- out = check_output(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name, '--format=json'])
- return json.loads(out)
- except (CalledProcessError, OSError, ValueError):
- return None
-
-
-def pool_set(service, pool_name, key, value):
- """
- Sets a value for a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param key: six.string_types
- :param value:
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def snapshot_pool(service, pool_name, snapshot_name):
- """
- Snapshots a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_snapshot(service, pool_name, snapshot_name):
- """
- Remove a snapshot from a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param snapshot_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-# max_bytes should be an int or long
-def set_pool_quota(service, pool_name, max_bytes):
- """
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param max_bytes: int or long
- :return: None. Can raise CalledProcessError
- """
- # Set a byte quota on a RADOS pool in ceph.
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
- 'max_bytes', str(max_bytes)]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_pool_quota(service, pool_name):
- """
- Set a byte quota on a RADOS pool in ceph.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def remove_erasure_profile(service, profile_name):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :return: None. Can raise CalledProcessError
- """
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
- profile_name]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
- failure_domain='host',
- data_chunks=2, coding_chunks=1,
- locality=None, durability_estimator=None):
- """
- Create a new erasure code profile if one does not already exist for it. Updates
- the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
- for more details
- :param service: six.string_types. The Ceph user name to run the command under
- :param profile_name: six.string_types
- :param erasure_plugin_name: six.string_types
- :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
- 'room', 'root', 'row'])
- :param data_chunks: int
- :param coding_chunks: int
- :param locality: int
- :param durability_estimator: int
- :return: None. Can raise CalledProcessError
- """
- # Ensure this failure_domain is allowed by Ceph
- validator(failure_domain, six.string_types,
- ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
-
- cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
- 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
- 'ruleset_failure_domain=' + failure_domain]
- if locality is not None and durability_estimator is not None:
- raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
-
- # Add plugin specific information
- if locality is not None:
- # For local erasure codes
- cmd.append('l=' + str(locality))
- if durability_estimator is not None:
- # For Shec erasure codes
- cmd.append('c=' + str(durability_estimator))
-
- if erasure_profile_exists(service, profile_name):
- cmd.append('--force')
-
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
-
-
-def rename_pool(service, old_name, new_name):
- """
- Rename a Ceph pool from old_name to new_name
- :param service: six.string_types. The Ceph user name to run the command under
- :param old_name: six.string_types
- :param new_name: six.string_types
- :return: None
- """
- validator(value=old_name, valid_type=six.string_types)
- validator(value=new_name, valid_type=six.string_types)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
- check_call(cmd)
-
-
-def erasure_profile_exists(service, name):
- """
- Check to see if an Erasure code profile already exists.
- :param service: six.string_types. The Ceph user name to run the command under
- :param name: six.string_types
- :return: int or None
- """
- validator(value=name, valid_type=six.string_types)
- try:
- check_call(['ceph', '--id', service,
- 'osd', 'erasure-code-profile', 'get',
- name])
- return True
- except CalledProcessError:
- return False
-
-
-def get_cache_mode(service, pool_name):
- """
- Find the current caching mode of the pool_name given.
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :return: int or None
- """
- validator(value=service, valid_type=six.string_types)
- validator(value=pool_name, valid_type=six.string_types)
- out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
- try:
- osd_json = json.loads(out)
- for pool in osd_json['pools']:
- if pool['pool_name'] == pool_name:
- return pool['cache_mode']
- return None
- except ValueError:
- raise
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out.split()
-
-
-def get_osds(service):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
- """
- version = ceph_version()
- if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
-
- return None
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def update_pool(client, pool, settings):
- cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
- for k, v in six.iteritems(settings):
- cmd.append(k)
- cmd.append(v)
-
- check_call(cmd)
-
-
-def create_pool(service, name, replicas=3, pg_num=None):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- if not pg_num:
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pg_num = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pg_num = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
- check_call(cmd)
-
- update_pool(service, name, settings={'size': str(replicas)})
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes(relation='ceph'):
- """Query named relation to determine current nodes."""
- hosts = []
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- create_keyring(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- Returns False if no ceph key is available in relation state.
- """
- key = None
- for rid in relation_ids(relation):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- create_keyring(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_create_pool(self, name, replica_count=3, pg_num=None):
- """Adds an operation to create a pool.
-
- @param pg_num setting: optional setting. If not provided, this value
- will be calculated by the broker based on how many OSDs are in the
- cluster at the time of creation. Note that, if provided, this value
- will be capped at the current available maximum.
- """
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count, 'pg_num': pg_num})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op', 'pg_num']:
- if self.ops[req_no].get(key) != other.ops[req_no].get(key):
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-# <Request complete actions>
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request, relation='ceph'):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids(relation):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request, relation='ceph'):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request, relation=relation)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request, relation='ceph'):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request, relation=relation):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids(relation):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index 3a3f514..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- devs = [d.strip().split(' ') for d in
- check_output(cmd).splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
-
-
-def is_mapped_loopback_device(device):
- """
- Checks if a given device name is an existing/mapped loopback device.
- :param device: str: Full path to the device (eg, /dev/loop1).
- :returns: str: Path to the backing file if is a loopback device
- empty string otherwise
- """
- return loopback_devices().get(device, "")
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index 34b5f71..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index 4e35c29..0000000
--- a/charms/trusty/ceilometer/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- check_call,
- check_output,
- call
-)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- try:
- out = check_output(['lsblk', '-P', device]).decode('UTF-8')
- except:
- return False
- return bool(re.search(r'MOUNTPOINT=".+"', out))
diff --git a/charms/trusty/ceilometer/charmhelpers/core/__init__.py b/charms/trusty/ceilometer/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/charmhelpers/core/decorators.py b/charms/trusty/ceilometer/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/ceilometer/charmhelpers/core/files.py b/charms/trusty/ceilometer/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/fstab.py b/charms/trusty/ceilometer/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/ceilometer/charmhelpers/core/hookenv.py b/charms/trusty/ceilometer/charmhelpers/core/hookenv.py
deleted file mode 100644
index 0132129..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1009 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The <class> and <id> provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The <class> and <id> provided must match a payload that has been previously
- registered with juju using payload-register. The <status> must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- <name> must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- return subprocess.check_output(cmd).strip()
diff --git a/charms/trusty/ceilometer/charmhelpers/core/host.py b/charms/trusty/ceilometer/charmhelpers/core/host.py
deleted file mode 100644
index 64b2df5..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/host.py
+++ /dev/null
@@ -1,714 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = True
- if service_running(service_name):
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_running(service_name)
- if not started:
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-_UPSTART_CONF = "/etc/init/{}.conf"
-_INIT_D_CONF = "/etc/init.d/{}"
-
-
-def service_running(service_name):
- """Determine whether a system service is running"""
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- if os.path.exists(_UPSTART_CONF.format(service_name)):
- try:
- output = subprocess.check_output(
- ['status', service_name],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- # This works for upstart scripts where the 'service' command
- # returns a consistent string to represent running 'start/running'
- if "start/running" in output:
- return True
- elif os.path.exists(_INIT_D_CONF.format(service_name)):
- # Check System V scripts init script return codes
- return service('status', service_name)
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False,
- primary_group=None, secondary_groups=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False, restart_functions=None):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result of lambda_f()
- """
- if restart_functions is None:
- restart_functions = {}
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for service_name in services_list:
- if service_name in restart_functions:
- restart_functions[service_name](service_name)
- else:
- for action in actions:
- service(action, service_name)
- return r
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also Chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
diff --git a/charms/trusty/ceilometer/charmhelpers/core/hugepage.py b/charms/trusty/ceilometer/charmhelpers/core/hugepage.py
deleted file mode 100644
index a783ad9..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/ceilometer/charmhelpers/core/kernel.py b/charms/trusty/ceilometer/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/services/__init__.py b/charms/trusty/ceilometer/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/ceilometer/charmhelpers/core/services/base.py b/charms/trusty/ceilometer/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/ceilometer/charmhelpers/core/services/helpers.py b/charms/trusty/ceilometer/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 2423704..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,292 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/ceilometer/charmhelpers/core/strutils.py b/charms/trusty/ceilometer/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/sysctl.py b/charms/trusty/ceilometer/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/ceilometer/charmhelpers/core/templating.py b/charms/trusty/ceilometer/charmhelpers/core/templating.py
deleted file mode 100644
index d2d8eaf..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/charms/trusty/ceilometer/charmhelpers/core/unitdata.py b/charms/trusty/ceilometer/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/ceilometer/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/__init__.py b/charms/trusty/ceilometer/charmhelpers/fetch/__init__.py
deleted file mode 100644
index db0d86a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,464 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- log("Marking {} as {}".format(packages, mark))
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except NotImplementedError:
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py b/charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index b8e0943..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'wb') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py b/charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index cafd27f..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-from charmhelpers.core.host import mkdir
-
-
-if filter_installed_packages(['bzr']) != []:
- apt_install(['bzr'])
- if filter_installed_packages(['bzr']) != []:
- raise NotImplementedError('Unable to install bzr')
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.bzr'))
- else:
- return True
-
- def branch(self, source, dest):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if os.path.exists(dest):
- check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
- else:
- check_call(['bzr', 'branch', source, dest])
-
- def install(self, source, dest=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
-
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer/charmhelpers/fetch/giturl.py b/charms/trusty/ceilometer/charmhelpers/fetch/giturl.py
deleted file mode 100644
index 65ed531..0000000
--- a/charms/trusty/ceilometer/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from subprocess import check_call, CalledProcessError
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource,
- filter_installed_packages,
- apt_install,
-)
-
-if filter_installed_packages(['git']) != []:
- apt_install(['git'])
- if filter_installed_packages(['git']) != []:
- raise NotImplementedError('Unable to install git')
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git', ''):
- return False
- elif not url_parts.scheme:
- return os.path.exists(os.path.join(source, '.git'))
- else:
- return True
-
- def clone(self, source, dest, branch="master", depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if os.path.exists(dest):
- cmd = ['git', '-C', dest, 'pull', source, branch]
- else:
- cmd = ['git', 'clone', source, dest, '--branch', branch]
- if depth:
- cmd.extend(['--depth', depth])
- check_call(cmd)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- try:
- self.clone(source, dest_dir, branch, depth)
- except CalledProcessError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/ceilometer/charmhelpers/payload/__init__.py b/charms/trusty/ceilometer/charmhelpers/payload/__init__.py
deleted file mode 100644
index e6f4249..0000000
--- a/charms/trusty/ceilometer/charmhelpers/payload/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Tools for working with files injected into a charm just before deployment."
diff --git a/charms/trusty/ceilometer/charmhelpers/payload/execd.py b/charms/trusty/ceilometer/charmhelpers/payload/execd.py
deleted file mode 100644
index 4d4d81a..0000000
--- a/charms/trusty/ceilometer/charmhelpers/payload/execd.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-import subprocess
-from charmhelpers.core import hookenv
-
-
-def default_execd_dir():
- return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
-
-
-def execd_module_paths(execd_dir=None):
- """Generate a list of full paths to modules within execd_dir."""
- if not execd_dir:
- execd_dir = default_execd_dir()
-
- if not os.path.exists(execd_dir):
- return
-
- for subpath in os.listdir(execd_dir):
- module = os.path.join(execd_dir, subpath)
- if os.path.isdir(module):
- yield module
-
-
-def execd_submodule_paths(command, execd_dir=None):
- """Generate a list of full paths to the specified command within exec_dir.
- """
- for module_path in execd_module_paths(execd_dir):
- path = os.path.join(module_path, command)
- if os.access(path, os.X_OK) and os.path.isfile(path):
- yield path
-
-
-def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
- """Run command for each module within execd_dir which defines it."""
- for submodule_path in execd_submodule_paths(command, execd_dir):
- try:
- subprocess.check_call(submodule_path, shell=True, stderr=stderr)
- except subprocess.CalledProcessError as e:
- hookenv.log("Error ({}) running {}. Output: {}".format(
- e.returncode, e.cmd, e.output))
- if die_on_error:
- sys.exit(e.returncode)
-
-
-def execd_preinstall(execd_dir=None):
- """Run charm-pre-install for each module within execd_dir."""
- execd_run('charm-pre-install', execd_dir=execd_dir)
diff --git a/charms/trusty/ceilometer/config.yaml b/charms/trusty/ceilometer/config.yaml
deleted file mode 100644
index e97ad1c..0000000
--- a/charms/trusty/ceilometer/config.yaml
+++ /dev/null
@@ -1,194 +0,0 @@
-options:
- debug:
- default: False
- type: boolean
- description: Enable debug logging.
- verbose:
- default: False
- type: boolean
- description: Enable verbose logging.
- use-syslog:
- type: boolean
- default: False
- description: |
- Setting this to True will allow supporting services to log to syslog.
- openstack-origin:
- default: distro
- type: string
- description: |
- Repository from which to install. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported Cloud Archive release pocket.
-
- Supported Cloud Archive sources include:
-
- cloud:<series>-<openstack-release>
- cloud:<series>-<openstack-release>/updates
- cloud:<series>-<openstack-release>/staging
- cloud:<series>-<openstack-release>/proposed
-
- For series=Precise we support cloud archives for openstack-release:
- * icehouse
-
- For series=Trusty we support cloud archives for openstack-release:
- * juno
- * kilo
- * ...
-
- NOTE: updating this setting to a source that is known to provide
- a later version of OpenStack will trigger a software upgrade.
-
- NOTE: when openstack-origin-git is specified, openstack specific
- packages will be installed from source rather than from the
- openstack-origin repository.
- region:
- default: RegionOne
- type: string
- description: OpenStack Region
- rabbit-user:
- default: ceilometer
- type: string
- description: Username to request access on rabbitmq-server.
- rabbit-vhost:
- default: openstack
- type: string
- description: RabbitMQ virtual host to request access on rabbitmq-server.
- ssl_cert:
- type: string
- default:
- description: |
- SSL certificate to install and use for API ports. Setting this value
- and ssl_key will enable reverse proxying, point Ceilometer's entry in the
- Keystone catalog to use https, and override any certficiate and key
- issued by Keystone (if it is configured to do so).
- ssl_key:
- type: string
- default:
- description: SSL key to use with certificate specified as ssl_cert.
- ssl_ca:
- type: string
- default:
- description: |
- SSL CA to use with the certificate and key provided - this is only
- required if you are providing a privately signed ssl_cert and ssl_key.
- nagios_context:
- default: "juju"
- type: string
- description: |
- Used by the nrpe-external-master subordinate charm.
- A string that will be prepended to instance name to set the host name
- in nagios. So for instance the hostname would be something like:
- juju-myservice-0
- If you're running multiple environments with the same services in them
- this allows you to differentiate between them.
- nagios_servicegroups:
- default: ""
- type: string
- description: |
- A comma-separated list of nagios servicegroups.
- If left empty, the nagios_context will be used as the servicegroup
- # Network configuration options
- # by default all access is over 'private-address'
- os-admin-network:
- type: string
- default:
- description: |
- The IP address and netmask of the OpenStack Admin network (e.g.
- 192.168.0.0/24)
-
- This network will be used for admin endpoints.
- os-internal-network:
- type: string
- default:
- description: |
- The IP address and netmask of the OpenStack Internal network (e.g.
- 192.168.0.0/24)
-
- This network will be used for internal endpoints.
- os-public-network:
- type: string
- default:
- description: |
- The IP address and netmask of the OpenStack Public network (e.g.
- 192.168.0.0/24)
-
- This network will be used for public endpoints.
- os-public-hostname:
- type: string
- default:
- description: |
- The hostname or address of the public endpoints created for ceilometer
- in the keystone identity provider.
-
- This value will be used for public endpoints. For example, an
- os-public-hostname set to 'ceilometer.example.com' with ssl enabled will
- create the following public endpoints for ceilometer:
-
- https://ceilometer.example.com:8777/
- # HA configuration settings
- vip:
- type: string
- default:
- description: |
- Virtual IP(s) to use to front API services in HA configuration.
-
- If multiple networks are being used, a VIP should be provided for each
- network, separated by spaces.
- ha-bindiface:
- type: string
- default: eth0
- description: |
- Default network interface on which HA cluster will bind to communication
- with the other members of the HA Cluster.
- ha-mcastport:
- type: int
- default: 5403
- description: |
- Default multicast port number that will be used to communicate between
- HA Cluster nodes.
- api-workers:
- type: int
- default: 1
- description: |
- Number of workers for Ceilometer API server. (>= Kilo).
- action-managed-upgrade:
- type: boolean
- default: False
- description: |
- If True enables openstack upgrades for this charm via juju actions.
- You will still need to set openstack-origin to the new repository but
- instead of an upgrade running automatically across all units, it will
- wait for you to execute the openstack-upgrade action for this charm on
- each unit. If False it will revert to existing behavior of upgrading
- all units on config change.
- haproxy-server-timeout:
- type: int
- default:
- description: |
- Server timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 30000ms is used.
- haproxy-client-timeout:
- type: int
- default:
- description: |
- Client timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 30000ms is used.
- haproxy-queue-timeout:
- type: int
- default:
- description: |
- Queue timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 5000ms is used.
- haproxy-connect-timeout:
- type: int
- default:
- description: |
- Connect timeout configuration in ms for haproxy, used in HA
- configurations. If not provided, default value of 5000ms is used.
- harden:
- default:
- type: string
- description: |
- Apply system hardening. Supports a space-delimited list of modules
- to run. Supported modules currently include os, ssh, apache and mysql.
-
diff --git a/charms/trusty/ceilometer/copyright b/charms/trusty/ceilometer/copyright
deleted file mode 100644
index f65bac7..0000000
--- a/charms/trusty/ceilometer/copyright
+++ /dev/null
@@ -1,32 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2011, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-Files: ocf/openstack/ceilometer-agent-central
-Copyright: Emilien Macchi
-License: Apache 2.0
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
diff --git a/charms/trusty/ceilometer/hardening.yaml b/charms/trusty/ceilometer/hardening.yaml
deleted file mode 100644
index 314bb38..0000000
--- a/charms/trusty/ceilometer/hardening.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-# Overrides file for contrib.hardening. See README.hardening in
-# contrib.hardening for info on how to use this file.
-ssh:
- server:
- use_pam: 'yes' # juju requires this
diff --git a/charms/trusty/ceilometer/hooks/amqp-relation-changed b/charms/trusty/ceilometer/hooks/amqp-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/amqp-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/amqp-relation-departed b/charms/trusty/ceilometer/hooks/amqp-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/amqp-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/amqp-relation-joined b/charms/trusty/ceilometer/hooks/amqp-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/amqp-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken b/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed b/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed b/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-plugin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined b/charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer-service-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer_contexts.py b/charms/trusty/ceilometer/hooks/ceilometer_contexts.py
deleted file mode 120000
index 6c03421..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer_contexts.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_contexts.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ceilometer_hooks.py b/charms/trusty/ceilometer/hooks/ceilometer_hooks.py
deleted file mode 100755
index 1fd861c..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer_hooks.py
+++ /dev/null
@@ -1,366 +0,0 @@
-#!/usr/bin/python
-import base64
-import shutil
-import subprocess
-import sys
-import os
-
-from charmhelpers.fetch import (
- apt_install, filter_installed_packages,
- apt_update
-)
-from charmhelpers.core.hookenv import (
- open_port,
- relation_get,
- relation_set,
- relation_ids,
- related_units,
- config,
- Hooks, UnregisteredHookError,
- log,
- status_set,
-)
-from charmhelpers.core.host import (
- service_restart,
- lsb_release
-)
-from charmhelpers.contrib.openstack.utils import (
- configure_installation_source,
- openstack_upgrade_available,
- pausable_restart_on_change as restart_on_change,
- is_unit_paused_set,
-)
-from ceilometer_utils import (
- get_packages,
- CEILOMETER_DB,
- CEILOMETER_SERVICE,
- CEILOMETER_ROLE,
- register_configs,
- restart_map,
- services,
- get_ceilometer_context,
- get_shared_secret,
- do_openstack_upgrade,
- set_shared_secret,
- assess_status,
- configure_pipeline,
-)
-from ceilometer_contexts import CEILOMETER_PORT
-from charmhelpers.contrib.openstack.ip import (
- canonical_url,
- PUBLIC, INTERNAL, ADMIN
-)
-from charmhelpers.contrib.charmsupport import nrpe
-from charmhelpers.contrib.network.ip import (
- get_iface_for_address,
- get_netmask_for_address
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- get_hacluster_config,
- is_elected_leader
-)
-from charmhelpers.contrib.peerstorage import (
- peer_retrieve,
- peer_store,
-)
-from charmhelpers.payload.execd import execd_preinstall
-from charmhelpers.contrib.hardening.harden import harden
-
-hooks = Hooks()
-CONFIGS = register_configs()
-
-
-@hooks.hook('install.real')
-@harden()
-def install():
- execd_preinstall()
- origin = config('openstack-origin')
- if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and origin == 'distro'):
- origin = 'cloud:precise-grizzly'
- configure_installation_source(origin)
- packages = filter_installed_packages(get_packages())
- if packages:
- status_set('maintenance', 'Installing packages')
- apt_update(fatal=True)
- apt_install(packages, fatal=True)
- open_port(CEILOMETER_PORT)
-
-
-@hooks.hook("amqp-relation-joined")
-def amqp_joined():
- relation_set(username=config('rabbit-user'),
- vhost=config('rabbit-vhost'))
-
-
-@hooks.hook("shared-db-relation-joined")
-def db_joined():
- relation_set(ceilometer_database=CEILOMETER_DB)
-
-
-@hooks.hook("amqp-relation-changed",
- "shared-db-relation-changed",
- "shared-db-relation-departed")
-@restart_on_change(restart_map())
-def any_changed():
- CONFIGS.write_all()
- configure_https()
- ceilometer_joined()
-
-
-@hooks.hook("identity-service-relation-changed")
-@restart_on_change(restart_map())
-def identity_service_relation_changed():
- CONFIGS.write_all()
- configure_https()
- keystone_joined()
- ceilometer_joined()
-
-
-@hooks.hook("amqp-relation-departed")
-@restart_on_change(restart_map())
-def amqp_departed():
- if 'amqp' not in CONFIGS.complete_contexts():
- log('amqp relation incomplete. Peer not ready?')
- return
- CONFIGS.write_all()
-
-
-def configure_https():
- """Enables SSL API Apache config if appropriate."""
- # need to write all to ensure changes to the entire request pipeline
- # propagate (c-api, haprxy, apache)
- CONFIGS.write_all()
- if 'https' in CONFIGS.complete_contexts():
- cmd = ['a2ensite', 'openstack_https_frontend']
- subprocess.check_call(cmd)
- else:
- cmd = ['a2dissite', 'openstack_https_frontend']
- subprocess.check_call(cmd)
-
- # TODO: improve this by checking if local CN certs are available
- # first then checking reload status (see LP #1433114).
- if not is_unit_paused_set():
- try:
- subprocess.check_call(['service', 'apache2', 'reload'])
- except subprocess.CalledProcessError:
- subprocess.call(['service', 'apache2', 'restart'])
-
-
-@hooks.hook('config-changed')
-@restart_on_change(restart_map())
-@harden()
-def config_changed():
- if not config('action-managed-upgrade'):
- if openstack_upgrade_available('ceilometer-common'):
- status_set('maintenance', 'Upgrading to new OpenStack release')
- do_openstack_upgrade(CONFIGS)
- update_nrpe_config()
- CONFIGS.write_all()
- ceilometer_joined()
- configure_https()
- for rid in relation_ids('identity-service'):
- keystone_joined(relid=rid)
-
-
-@hooks.hook('upgrade-charm')
-@harden()
-def upgrade_charm():
- install()
- update_nrpe_config()
- any_changed()
-
-
-def install_ceilometer_ocf():
- dest_file = "/usr/lib/ocf/resource.d/openstack/ceilometer-agent-central"
- src_file = 'ocf/openstack/ceilometer-agent-central'
-
- if not os.path.isdir(os.path.dirname(dest_file)):
- os.makedirs(os.path.dirname(dest_file))
- if not os.path.exists(dest_file):
- shutil.copy(src_file, dest_file)
-
-
-@hooks.hook('cluster-relation-joined')
-@restart_on_change(restart_map(), stopstart=True)
-def cluster_joined():
- install_ceilometer_ocf()
-
- # If this node is the elected leader then share our secret with other nodes
- if is_elected_leader('grp_ceilometer_vips'):
- peer_store('shared_secret', get_shared_secret())
-
- CONFIGS.write_all()
-
-
-@hooks.hook('cluster-relation-changed',
- 'cluster-relation-departed')
-@restart_on_change(restart_map(), stopstart=True)
-def cluster_changed():
- shared_secret = peer_retrieve('shared_secret')
- if shared_secret is None or shared_secret.strip() == '':
- log('waiting for shared secret to be provided by leader')
- elif not shared_secret == get_shared_secret():
- set_shared_secret(shared_secret)
-
- CONFIGS.write_all()
-
-
-@hooks.hook('ha-relation-joined')
-def ha_joined():
- cluster_config = get_hacluster_config()
-
- resources = {
- 'res_ceilometer_haproxy': 'lsb:haproxy',
- 'res_ceilometer_agent_central': ('ocf:openstack:'
- 'ceilometer-agent-central')
- }
-
- resource_params = {
- 'res_ceilometer_haproxy': 'op monitor interval="5s"',
- 'res_ceilometer_agent_central': 'op monitor interval="30s"'
- }
-
- amqp_ssl_port = None
- for rel_id in relation_ids('amqp'):
- for unit in related_units(rel_id):
- amqp_ssl_port = relation_get('ssl_port', unit, rel_id)
-
- if amqp_ssl_port:
- params = ('params amqp_server_port="%s" op monitor interval="30s"' %
- (amqp_ssl_port))
- resource_params['res_ceilometer_agent_central'] = params
-
- vip_group = []
- for vip in cluster_config['vip'].split():
- res_ceilometer_vip = 'ocf:heartbeat:IPaddr2'
- vip_params = 'ip'
-
- iface = get_iface_for_address(vip)
- if iface is not None:
- vip_key = 'res_ceilometer_{}_vip'.format(iface)
- resources[vip_key] = res_ceilometer_vip
- resource_params[vip_key] = (
- 'params {ip}="{vip}" cidr_netmask="{netmask}"'
- ' nic="{iface}"'.format(ip=vip_params,
- vip=vip,
- iface=iface,
- netmask=get_netmask_for_address(vip))
- )
- vip_group.append(vip_key)
-
- if len(vip_group) >= 1:
- relation_set(groups={'grp_ceilometer_vips': ' '.join(vip_group)})
-
- init_services = {
- 'res_ceilometer_haproxy': 'haproxy'
- }
- clones = {
- 'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'
- }
- relation_set(init_services=init_services,
- corosync_bindiface=cluster_config['ha-bindiface'],
- corosync_mcastport=cluster_config['ha-mcastport'],
- resources=resources,
- resource_params=resource_params,
- clones=clones)
-
-
-@hooks.hook('ha-relation-changed')
-def ha_changed():
- clustered = relation_get('clustered')
- if not clustered or clustered in [None, 'None', '']:
- log('ha_changed: hacluster subordinate not fully clustered.')
- else:
- log('Cluster configured, notifying other services and updating '
- 'keystone endpoint configuration')
- for rid in relation_ids('identity-service'):
- keystone_joined(relid=rid)
-
-
-@hooks.hook("identity-service-relation-joined")
-def keystone_joined(relid=None):
- public_url = "{}:{}".format(
- canonical_url(CONFIGS, PUBLIC),
- CEILOMETER_PORT
- )
- admin_url = "{}:{}".format(
- canonical_url(CONFIGS, ADMIN),
- CEILOMETER_PORT
- )
- internal_url = "{}:{}".format(
- canonical_url(CONFIGS, INTERNAL),
- CEILOMETER_PORT
- )
- region = config("region")
- relation_set(relation_id=relid,
- service=CEILOMETER_SERVICE,
- public_url=public_url,
- admin_url=admin_url,
- internal_url=internal_url,
- requested_roles=CEILOMETER_ROLE,
- region=region)
-
-
-@hooks.hook('identity-notifications-relation-changed')
-def identity_notifications_changed():
- """Receive notifications from keystone."""
- notifications = relation_get()
- if not notifications:
- return
-
- # Some ceilometer services will create a client and request
- # the service catalog from keystone on startup. So if
- # endpoints change we need to restart these services.
- key = '%s-endpoint-changed' % (CEILOMETER_SERVICE)
- if key in notifications:
- service_restart('ceilometer-alarm-evaluator')
- service_restart('ceilometer-alarm-notifier')
-
-
-@hooks.hook("ceilometer-service-relation-joined")
-def ceilometer_joined():
- # Pass local context data onto related agent services
- context = get_ceilometer_context()
- # This value gets tranformed to a path by the context we need to
- # pass the data to agents.
- if 'rabbit_ssl_ca' in context:
- with open(context['rabbit_ssl_ca']) as fh:
- context['rabbit_ssl_ca'] = base64.b64encode(fh.read())
- for relid in relation_ids('ceilometer-service'):
- relation_set(relid, context)
-
-
-@hooks.hook('nrpe-external-master-relation-joined',
- 'nrpe-external-master-relation-changed')
-def update_nrpe_config():
- # python-dbus is used by check_upstart_job
- apt_install('python-dbus')
- hostname = nrpe.get_nagios_hostname()
- current_unit = nrpe.get_nagios_unit_name()
- nrpe_setup = nrpe.NRPE(hostname=hostname)
- nrpe.copy_nrpe_checks()
- nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
- nrpe.add_haproxy_checks(nrpe_setup, current_unit)
- nrpe_setup.write()
-
-
-@hooks.hook('update-status')
-@harden()
-def update_status():
- log('Updating status.')
-
-
-@hooks.hook('ceilometer-plugin-relation-changed')
-@hooks.hook('ceilometer-plugin-relation-departed')
-@hooks.hook('ceilometer-plugin-relation-broken')
-@restart_on_change(restart_map())
-def ceilometer_plugin_relation():
- configure_pipeline()
-
-
-if __name__ == '__main__':
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log('Unknown hook {} - skipping.'.format(e))
- assess_status(CONFIGS)
diff --git a/charms/trusty/ceilometer/hooks/ceilometer_utils.py b/charms/trusty/ceilometer/hooks/ceilometer_utils.py
deleted file mode 120000
index e333253..0000000
--- a/charms/trusty/ceilometer/hooks/ceilometer_utils.py
+++ /dev/null
@@ -1 +0,0 @@
-../lib/ceilometer_utils.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/charmhelpers b/charms/trusty/ceilometer/hooks/charmhelpers
deleted file mode 120000
index 702de73..0000000
--- a/charms/trusty/ceilometer/hooks/charmhelpers
+++ /dev/null
@@ -1 +0,0 @@
-../charmhelpers \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/cluster-relation-changed b/charms/trusty/ceilometer/hooks/cluster-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/cluster-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/cluster-relation-departed b/charms/trusty/ceilometer/hooks/cluster-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/cluster-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/cluster-relation-joined b/charms/trusty/ceilometer/hooks/cluster-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/cluster-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/config-changed b/charms/trusty/ceilometer/hooks/config-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ha-relation-changed b/charms/trusty/ceilometer/hooks/ha-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ha-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/ha-relation-joined b/charms/trusty/ceilometer/hooks/ha-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/ha-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/identity-notifications-relation-changed b/charms/trusty/ceilometer/hooks/identity-notifications-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/identity-notifications-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/identity-service-relation-changed b/charms/trusty/ceilometer/hooks/identity-service-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/identity-service-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/identity-service-relation-joined b/charms/trusty/ceilometer/hooks/identity-service-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/identity-service-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/install b/charms/trusty/ceilometer/hooks/install
deleted file mode 100755
index 83a9d3c..0000000
--- a/charms/trusty/ceilometer/hooks/install
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-# Wrapper to deal with newer Ubuntu versions that don't have py2 installed
-# by default.
-
-declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml')
-
-check_and_install() {
- pkg="${1}-${2}"
- if ! dpkg -s ${pkg} 2>&1 > /dev/null; then
- apt-get -y install ${pkg}
- fi
-}
-
-PYTHON="python"
-
-for dep in ${DEPS[@]}; do
- check_and_install ${PYTHON} ${dep}
-done
-
-exec ./hooks/install.real
diff --git a/charms/trusty/ceilometer/hooks/install.real b/charms/trusty/ceilometer/hooks/install.real
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/install.real
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed b/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined b/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/nrpe-external-master-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/shared-db-relation-changed b/charms/trusty/ceilometer/hooks/shared-db-relation-changed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/shared-db-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/shared-db-relation-departed b/charms/trusty/ceilometer/hooks/shared-db-relation-departed
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/shared-db-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/shared-db-relation-joined b/charms/trusty/ceilometer/hooks/shared-db-relation-joined
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/shared-db-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/start b/charms/trusty/ceilometer/hooks/start
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/stop b/charms/trusty/ceilometer/hooks/stop
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/update-status b/charms/trusty/ceilometer/hooks/update-status
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/update-status
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/hooks/upgrade-charm b/charms/trusty/ceilometer/hooks/upgrade-charm
deleted file mode 120000
index c948469..0000000
--- a/charms/trusty/ceilometer/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-ceilometer_hooks.py \ No newline at end of file
diff --git a/charms/trusty/ceilometer/icon.svg b/charms/trusty/ceilometer/icon.svg
deleted file mode 100644
index 84de61c..0000000
--- a/charms/trusty/ceilometer/icon.svg
+++ /dev/null
@@ -1,717 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- sodipodi:docname="openstack-ceilometer.svg"
- inkscape:version="0.48+devel r12825"
- version="1.1"
- id="svg6517"
- height="96"
- width="96">
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="2.7214607"
- inkscape:cy="63.792857"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="false"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1029"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="false"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false"
- inkscape:snap-global="true"
- inkscape:snap-bbox="true"
- inkscape:bbox-paths="true"
- inkscape:bbox-nodes="true"
- inkscape:snap-bbox-edge-midpoints="true"
- inkscape:snap-bbox-midpoints="true"
- inkscape:object-paths="true"
- inkscape:snap-intersection-paths="true"
- inkscape:object-nodes="true"
- inkscape:snap-smooth-nodes="true"
- inkscape:snap-midpoints="true"
- inkscape:snap-object-midpoints="true"
- inkscape:snap-center="true"
- inkscape:snap-grids="false"
- inkscape:snap-nodes="true"
- inkscape:snap-others="true">
- <inkscape:grid
- id="grid821"
- type="xygrid" />
- <sodipodi:guide
- id="guide823"
- position="18.34962,45.78585"
- orientation="1,0" />
- <sodipodi:guide
- id="guide827"
- position="78.02001,46.32673"
- orientation="1,0" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4184"
- position="65.586619,19.307"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4188"
- position="62.756032,71.583147"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4190"
- position="47.812194,78.049658"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- id="guide4194"
- position="25.60516,42.21665"
- orientation="1,0" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4202"
- position="25.60516,42.070975"
- orientation="-0.087155743,0.9961947" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4204"
- position="25.60516,42.070975"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4242"
- position="51.81985,44.36226"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4252"
- position="73.5625,75.210937"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- inkscape:color="rgb(140,140,240)"
- id="guide4254"
- position="18.34962,75.472017"
- orientation="-0.70710678,-0.70710678" />
- <sodipodi:guide
- inkscape:label=""
- id="guide4288"
- position="21.871042,21.577512"
- orientation="-0.70710678,-0.70710678" />
- </sodipodi:namedview>
- <defs
- id="defs6519">
- <filter
- id="filter1121"
- inkscape:label="Inner Shadow"
- style="color-interpolation-filters:sRGB;">
- <feFlood
- id="feFlood1123"
- result="flood"
- flood-color="rgb(0,0,0)"
- flood-opacity="0.59999999999999998" />
- <feComposite
- id="feComposite1125"
- result="composite1"
- operator="out"
- in2="SourceGraphic"
- in="flood" />
- <feGaussianBlur
- id="feGaussianBlur1127"
- result="blur"
- stdDeviation="1"
- in="composite1" />
- <feOffset
- id="feOffset1129"
- result="offset"
- dy="2"
- dx="0" />
- <feComposite
- id="feComposite1131"
- result="composite2"
- operator="atop"
- in2="SourceGraphic"
- in="offset" />
- </filter>
- <filter
- id="filter950"
- inkscape:label="Drop Shadow"
- style="color-interpolation-filters:sRGB;">
- <feFlood
- id="feFlood952"
- result="flood"
- flood-color="rgb(0,0,0)"
- flood-opacity="0.25" />
- <feComposite
- id="feComposite954"
- result="composite1"
- operator="in"
- in2="SourceGraphic"
- in="flood" />
- <feGaussianBlur
- id="feGaussianBlur956"
- result="blur"
- stdDeviation="1"
- in="composite1" />
- <feOffset
- id="feOffset958"
- result="offset"
- dy="1"
- dx="0" />
- <feComposite
- id="feComposite960"
- result="composite2"
- operator="over"
- in2="offset"
- in="SourceGraphic" />
- </filter>
- <filter
- inkscape:label="Badge Shadow"
- id="filter891"
- inkscape:collect="always">
- <feGaussianBlur
- id="feGaussianBlur893"
- stdDeviation="0.71999962"
- inkscape:collect="always" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3831">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.86309522"
- id="feGaussianBlur3833" />
- </filter>
- <filter
- inkscape:collect="always"
- id="filter3868"
- x="-0.17186206"
- width="1.3437241"
- y="-0.1643077"
- height="1.3286154">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.62628186"
- id="feGaussianBlur3870" />
- </filter>
- <linearGradient
- id="linearGradient4328"
- inkscape:collect="always">
- <stop
- id="stop4330"
- offset="0"
- style="stop-color:#871f1c;stop-opacity:1;" />
- <stop
- id="stop4332"
- offset="1"
- style="stop-color:#651715;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- id="linearGradient902"
- inkscape:collect="always">
- <stop
- id="stop904"
- offset="0"
- style="stop-color:#cccccc;stop-opacity:1" />
- <stop
- id="stop906"
- offset="1"
- style="stop-color:#e6e6e6;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- id="Background">
- <stop
- style="stop-color:#22779e;stop-opacity:1"
- offset="0"
- id="stop4178" />
- <stop
- style="stop-color:#2991c0;stop-opacity:1"
- offset="1"
- id="stop4180" />
- </linearGradient>
- <clipPath
- id="clipPath873"
- clipPathUnits="userSpaceOnUse">
- <g
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- inkscape:label="Layer 1"
- id="g875"
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)">
- <path
- sodipodi:nodetypes="sssssssss"
- inkscape:connector-curvature="0"
- id="path877"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline" />
- </g>
- </clipPath>
- <style
- type="text/css"
- id="style867">
- .fil0 {fill:#1F1A17}
- </style>
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="635.29077"
- x2="-220"
- y1="731.29077"
- x1="-220"
- id="linearGradient908"
- xlink:href="#linearGradient902"
- inkscape:collect="always" />
- <clipPath
- id="clipPath16">
- <path
- d="m -9,-9 614,0 0,231 -614,0 0,-231 z"
- id="path18" />
- </clipPath>
- <clipPath
- id="clipPath116">
- <path
- d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
- id="path118" />
- </clipPath>
- <clipPath
- id="clipPath128">
- <path
- d="m 91.7368,146.3253 -9.7039,-1.577 -8.8548,-3.8814 -7.5206,-4.7308 -7.1566,-8.7335 -4.0431,-4.282 -3.9093,-1.4409 -1.034,2.5271 1.8079,2.6096 0.4062,3.6802 1.211,-0.0488 1.3232,-1.2069 -0.3569,3.7488 -1.4667,0.9839 0.0445,1.4286 -3.4744,-1.9655 -3.1462,-3.712 -0.6559,-3.3176 1.3453,-2.6567 1.2549,-4.5133 2.5521,-1.2084 2.6847,0.1318 2.5455,1.4791 -1.698,-8.6122 1.698,-9.5825 -1.8692,-4.4246 -6.1223,-6.5965 1.0885,-3.941 2.9002,-4.5669 5.4688,-3.8486 2.9007,-0.3969 3.225,-0.1094 -2.012,-8.2601 7.3993,-3.0326 9.2188,-1.2129 3.1535,2.0619 0.2427,5.5797 3.5178,5.8224 0.2426,4.6094 8.4909,-0.6066 7.8843,0.7279 -7.8843,-4.7307 1.3343,-5.701 4.9731,-7.763 4.8521,-2.0622 3.8814,1.5769 1.577,3.1538 8.1269,6.1861 1.5769,-1.3343 12.7363,-0.485 2.5473,2.0619 0.2426,3.6391 -0.849,1.5767 -0.6066,9.8251 -4.2454,8.4909 0.7276,3.7605 2.5475,-1.3343 7.1566,-6.6716 3.5175,-0.2424 3.8815,1.5769 3.8818,2.9109 1.9406,6.3077 11.4021,-0.7277 6.914,2.6686 5.5797,5.2157 4.0028,7.5206 0.9706,8.8546 -0.8493,10.3105 -2.1832,9.2185 -2.1836,2.9112 -3.0322,0.9706 -5.3373,-5.8224 -4.8518,-1.6982 -4.2455,7.0353 -4.2454,3.8815 -2.3049,1.4556 -9.2185,7.6419 -7.3993,4.0028 -7.3993,0.6066 -8.6119,-1.4556 -7.5206,-2.7899 -5.2158,-4.2454 -4.1241,-4.9734 -4.2454,-1.2129"
- id="path130" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient3850">
- <stop
- style="stop-color:#000000;stop-opacity:1;"
- offset="0"
- id="stop3852" />
- <stop
- style="stop-color:#000000;stop-opacity:0;"
- offset="1"
- id="stop3854" />
- </linearGradient>
- <clipPath
- id="clipPath3095"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3097"
- d="m 976.648,389.551 -842.402,0 0,839.999 842.402,0 0,-839.999" />
- </clipPath>
- <clipPath
- id="clipPath3195"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3197"
- d="m 611.836,756.738 -106.34,105.207 c -8.473,8.289 -13.617,20.102 -13.598,33.379 L 598.301,790.207 c -0.031,-13.418 5.094,-25.031 13.535,-33.469" />
- </clipPath>
- <clipPath
- id="clipPath3235"
- clipPathUnits="userSpaceOnUse">
- <path
- inkscape:connector-curvature="0"
- id="path3237"
- d="m 1095.64,1501.81 c 35.46,-35.07 70.89,-70.11 106.35,-105.17 4.4,-4.38 7.11,-10.53 7.11,-17.55 l -106.37,105.21 c 0,7 -2.71,13.11 -7.09,17.51" />
- </clipPath>
- <linearGradient
- inkscape:collect="always"
- id="linearGradient4389">
- <stop
- style="stop-color:#871f1c;stop-opacity:1"
- offset="0"
- id="stop4391" />
- <stop
- style="stop-color:#c42e24;stop-opacity:1"
- offset="1"
- id="stop4393" />
- </linearGradient>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath4591">
- <path
- id="path4593"
- style="fill:#ff00ff;fill-opacity:1;fill-rule:nonzero;stroke:none"
- d="m 1106.6009,730.43734 -0.036,21.648 c -0.01,3.50825 -2.8675,6.61375 -6.4037,6.92525 l -83.6503,7.33162 c -3.5205,0.30763 -6.3812,-2.29987 -6.3671,-5.8145 l 0.036,-21.6475 20.1171,-1.76662 -0.011,4.63775 c 0,1.83937 1.4844,3.19925 3.3262,3.0395 l 49.5274,-4.33975 c 1.8425,-0.166 3.3425,-1.78125 3.3538,-3.626 l 0.01,-4.63025 20.1,-1.7575"
- inkscape:connector-curvature="0" />
- </clipPath>
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="radialGradient3856"
- cx="-26.508606"
- cy="93.399292"
- fx="-26.508606"
- fy="93.399292"
- r="20.40658"
- gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
- gradientUnits="userSpaceOnUse" />
- <filter
- inkscape:collect="always"
- id="filter3885">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="5.7442192"
- id="feGaussianBlur3887" />
- </filter>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="linearGradient3895"
- x1="348.20132"
- y1="593.11615"
- x2="-51.879555"
- y2="993.19702"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-318.48033,212.32022)" />
- <radialGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="radialGradient3902"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(-1.4333926,-2.2742838,1.1731823,-0.73941125,-174.08025,98.374394)"
- cx="-26.508606"
- cy="93.399292"
- fx="-26.508606"
- fy="93.399292"
- r="20.40658" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient3850"
- id="linearGradient3904"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(-318.48033,212.32022)"
- x1="348.20132"
- y1="593.11615"
- x2="-51.879555"
- y2="993.19702" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="23.383789"
- x2="25.217773"
- y1="27.095703"
- x1="21.505859"
- id="linearGradient4318"
- xlink:href="#linearGradient4389"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="20.884073"
- x2="71.960243"
- y1="20.041777"
- x1="72.802544"
- id="linearGradient4326"
- xlink:href="#linearGradient4389"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="74.246689"
- x2="21.69179"
- y1="73.643555"
- x1="22.294922"
- id="linearGradient4334"
- xlink:href="#linearGradient4328"
- inkscape:collect="always" />
- <linearGradient
- gradientUnits="userSpaceOnUse"
- y2="24.881023"
- x2="57.450542"
- y1="77.404816"
- x1="57.450542"
- id="linearGradient4319"
- xlink:href="#linearGradient4552"
- inkscape:collect="always" />
- <linearGradient
- id="linearGradient4552"
- inkscape:collect="always">
- <stop
- id="stop4554"
- offset="0"
- style="stop-color:#d93023;stop-opacity:1" />
- <stop
- id="stop4556"
- offset="1"
- style="stop-color:#e63f46;stop-opacity:1" />
- </linearGradient>
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4444"
- x1="-228.90239"
- y1="694.04291"
- x2="-223.99701"
- y2="687.45367"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4469"
- x1="-216.46823"
- y1="693.81781"
- x2="-210.73438"
- y2="687.75952"
- gradientUnits="userSpaceOnUse" />
- <linearGradient
- inkscape:collect="always"
- xlink:href="#linearGradient4389"
- id="linearGradient4479"
- x1="-206.06966"
- y1="682.03033"
- x2="-199.5918"
- y2="675.95483"
- gradientUnits="userSpaceOnUse" />
- </defs>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- style="display:inline"
- transform="translate(268,-635.29076)"
- id="layer1"
- inkscape:groupmode="layer"
- inkscape:label="BACKGROUND">
- <path
- sodipodi:nodetypes="sssssssss"
- inkscape:connector-curvature="0"
- id="path6455"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- style="fill:url(#linearGradient908);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)" />
- <g
- id="g4336">
- <g
- transform="matrix(0.06790711,0,0,-0.06790711,-239.0411,765.68623)"
- id="g3897"
- xml:space="default">
- <path
- inkscape:connector-curvature="0"
- style="opacity:0.7;color:#000000;fill:url(#radialGradient3902);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3831);enable-background:accumulate"
- d="m -48.09375,67.8125 c -0.873996,-0.0028 -2.089735,0.01993 -3.40625,0.09375 -2.633031,0.147647 -5.700107,0.471759 -7.78125,1.53125 a 1.0001,1.0001 0 0 0 -0.25,1.59375 L -38.8125,92.375 a 1.0001,1.0001 0 0 0 0.84375,0.3125 L -24,90.5625 a 1.0001,1.0001 0 0 0 0.53125,-1.71875 L -46.0625,68.125 a 1.0001,1.0001 0 0 0 -0.625,-0.28125 c 0,0 -0.532254,-0.02842 -1.40625,-0.03125 z"
- transform="matrix(10.616011,0,0,-10.616011,357.98166,1725.8152)"
- id="path3821"
- xml:space="default" />
- <path
- style="opacity:0.6;color:#000000;fill:none;stroke:#000000;stroke-width:2.77429962;stroke-linecap:round;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3868);enable-background:accumulate"
- d="m -15.782705,81.725197 8.7458304,9.147937"
- id="path3858"
- inkscape:connector-curvature="0"
- transform="matrix(10.616011,0,0,-10.616011,39.50133,1725.8152)"
- xml:space="default" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;fill:url(#linearGradient3904);fill-opacity:1;stroke:none;stroke-width:2;marker:none;visibility:visible;display:inline;overflow:visible;filter:url(#filter3885);enable-background:accumulate;font-family:Sans;-inkscape-font-specification:Sans"
- d="m -95.18931,981.03569 a 10.617073,10.617073 0 0 1 -0.995251,-0.3318 l -42.795789,-5.308 a 10.617073,10.617073 0 0 1 -6.30326,-17.9145 L -4.2897203,812.5065 a 10.617073,10.617073 0 0 1 8.95726,-3.3175 l 49.0990503,7.63026 a 10.617073,10.617073 0 0 1 5.97151,17.91452 L -87.55905,978.04989 a 10.617073,10.617073 0 0 1 -7.63026,2.9858 z"
- id="path3874"
- inkscape:connector-curvature="0"
- xml:space="default" />
- </g>
- <path
- style="opacity:1;color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="M 20.697266 20.515625 C 19.336871 21.10204 18.348875 22.456253 18.345703 23.970703 L 18.351562 58.322266 L 28.798828 49.138672 C 28.677618 48.755492 28.614281 48.351433 28.613281 47.939453 C 28.613261 46.832713 29.052994 45.731919 29.833984 44.880859 C 30.614994 44.029779 31.672894 43.497091 32.777344 43.400391 C 34.625174 43.240001 36.250631 44.319081 36.769531 46.050781 L 42.583984 46.052734 C 42.966392 45.246167 43.574155 44.582164 44.298828 44.115234 L 20.697266 20.515625 z M 36.501953 49.099609 C 35.800103 50.580079 34.357634 51.603391 32.777344 51.744141 C 32.038304 51.807991 31.313171 51.674389 30.675781 51.355469 L 18.351562 62.191406 L 18.353516 69.601562 C 18.349848 70.477025 18.685456 71.239319 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.660156 79.126953 L 33.488281 71.738281 L 67.28125 68.777344 C 67.744386 68.736546 68.184049 68.603285 68.587891 68.404297 L 49.771484 49.589844 C 49.024593 50.774607 47.754946 51.625037 46.310547 51.751953 C 44.461497 51.913663 42.833613 50.834232 42.314453 49.101562 L 36.501953 49.099609 z "
- transform="translate(-268,635.29076)"
- id="path4308" />
- <path
- style="opacity:1;color:#000000;fill:#c42e24;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:0.1;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="M 67.320312 16.253906 L 21.822266 20.212891 C 21.426436 20.248017 21.052174 20.362637 20.697266 20.515625 L 44.298828 44.115234 C 44.9049 43.724723 45.592393 43.470157 46.3125 43.40625 C 46.79566 43.36596 47.274906 43.410199 47.728516 43.537109 L 53.324219 36.660156 C 53.017769 36.094616 52.857922 35.452456 52.857422 34.785156 C 52.857752 32.480326 54.723287 30.446511 57.023438 30.244141 C 59.119587 30.062831 60.885597 31.472453 61.148438 33.533203 L 70.771484 35.117188 L 70.771484 38.248047 L 70.775391 31.386719 L 77.232422 24.398438 L 69.892578 17.179688 L 69.884766 17.189453 C 69.251763 16.542736 68.342666 16.171306 67.320312 16.253906 z M 70.771484 38.248047 L 60.412109 36.541016 C 59.630809 37.708426 58.367804 38.472897 57.021484 38.591797 C 56.537844 38.632787 56.057726 38.589411 55.603516 38.462891 L 50.007812 45.337891 C 50.314462 45.903801 50.474339 46.547144 50.474609 47.214844 C 50.474197 48.071259 50.213409 48.888836 49.771484 49.589844 L 68.587891 68.404297 C 69.859183 67.777881 70.75673 66.462035 70.759766 65.015625 L 70.771484 38.248047 z "
- transform="translate(-268,635.29076)"
- id="path4233" />
- <rect
- xml:space="default"
- y="648.49109"
- x="-258.70667"
- height="69.20665"
- width="69.20665"
- id="rect3585-3"
- style="opacity:0.8;color:#000000;fill:none;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4318);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:5.25;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
- d="M 22.029297 20.195312 L 21.822266 20.212891 C 19.919838 20.381715 18.370776 22.043134 18.349609 23.939453 L 24.662109 30.251953 L 25.605469 31.195312 L 25.605469 31.103516 C 25.609469 29.193966 27.168951 27.515473 29.082031 27.345703 L 29.171875 27.337891 L 28.373047 26.539062 L 22.029297 20.195312 z "
- transform="translate(-268,635.29076)"
- id="path4256" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.53600003;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4326);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:2.4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
- d="M 67.330078 16.253906 L 68.03125 16.955078 L 74.472656 23.396484 L 74.580078 23.386719 C 75.531927 23.309814 76.390588 23.620657 77.015625 24.185547 L 69.892578 17.179688 L 69.884766 17.189453 C 69.253843 16.544862 68.348328 16.174551 67.330078 16.253906 z M 77.054688 24.222656 C 77.115589 24.279686 77.164628 24.348282 77.220703 24.410156 L 77.232422 24.398438 L 77.054688 24.222656 z "
- transform="translate(-268,635.29076)"
- id="path4272" />
- <path
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:1;color:#000000;color-interpolation:sRGB;color-interpolation-filters:sRGB;fill:url(#linearGradient4334);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:1.7;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif;stroke-miterlimit:4;stroke-dasharray:none"
- d="M 18.363281 69.712891 C 18.387957 70.540342 18.709001 71.264013 19.222656 71.802734 L 19.212891 71.8125 L 19.357422 71.955078 C 19.360505 71.957909 19.364093 71.960073 19.367188 71.962891 L 26.599609 79.068359 C 26.044831 78.550125 25.698241 77.821152 25.638672 76.988281 L 18.951172 70.298828 L 18.363281 69.712891 z M 26.636719 79.103516 L 26.660156 79.126953 L 26.664062 79.123047 C 26.655656 79.11562 26.645042 79.111033 26.636719 79.103516 z "
- transform="translate(-268,635.29076)"
- id="path4290" />
- <path
- style="opacity:1;color:#000000;fill:#96231e;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -249.52901,697.37716 7.14034,7.23587 12.32422,-10.83594 -7.25977,-7.13086 z"
- id="path4428"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#871f1c;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -249.64844,693.61303 7.25977,7.13085 5.06445,-14.09765 -7.65515,-5.41781 z"
- id="path4426"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#96231e;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -231.49805,684.39037 7.25977,7.13086 5.8125,0.002 -7.25977,-7.13086 z"
- id="path4430"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4469);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -225.68555,684.39232 7.25977,7.13091 -0.51715,5.74927 8.04214,0.24126 4.42318,-7.15751 -4.25676,-2.59674 -7.25782,-7.13086 z"
- id="path4446"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#9d241f;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -210.73437,687.75951 -7.25782,-7.13086 5.59571,-6.875 7.25976,7.13086 z"
- id="path4432"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:#9d241f;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -207.58789,671.83178 7.25781,7.13281 10.36133,1.70508 -7.25977,-7.13086 z"
- id="path4434"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4444);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -231.49805,684.39037 7.25977,7.13086 -0.0125,5.42958 -5.81371,-3.17372 -7.25977,-7.13086 z"
- id="path4436"
- inkscape:connector-curvature="0" />
- <path
- style="color:#000000;fill:url(#linearGradient4479);fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- d="m -205.13672,680.88451 -7.25976,-7.13086 4.80859,-1.92187 7.25781,7.13281 -0.59845,5.11879 z"
- id="path4471"
- inkscape:connector-curvature="0" />
- <path
- style="fill:url(#linearGradient4319);fill-opacity:1;fill-rule:nonzero;stroke:none;display:inline;opacity:1"
- d="M 74.580078 23.386719 L 29.082031 27.345703 C 27.168951 27.515473 25.609469 29.193966 25.605469 31.103516 L 25.611328 65.453125 L 36.058594 56.269531 C 35.937384 55.886351 35.874047 55.482292 35.873047 55.070312 C 35.873027 53.963572 36.31276 52.862779 37.09375 52.011719 C 37.87476 51.160639 38.932659 50.62795 40.037109 50.53125 C 41.884939 50.37086 43.510397 51.449941 44.029297 53.181641 L 49.84375 53.183594 C 50.54598 51.702464 51.989182 50.677429 53.570312 50.537109 C 54.053473 50.496819 54.532718 50.541059 54.986328 50.667969 L 60.583984 43.792969 C 60.277534 43.227429 60.117687 42.583316 60.117188 41.916016 C 60.117518 39.611186 61.983053 37.579323 64.283203 37.376953 C 66.379353 37.195643 68.145363 38.603313 68.408203 40.664062 L 78.03125 42.248047 L 78.03125 45.378906 L 78.041016 26.554688 C 78.044016 24.640387 76.496418 23.231889 74.580078 23.386719 z M 78.03125 45.378906 L 67.669922 43.673828 C 66.888622 44.841238 65.625617 45.603756 64.279297 45.722656 C 63.795657 45.763646 63.317491 45.72027 62.863281 45.59375 L 57.265625 52.46875 C 57.572275 53.03466 57.734105 53.678003 57.734375 54.345703 C 57.733265 56.649533 55.869342 58.680803 53.570312 58.882812 C 51.721263 59.044523 50.093379 57.965092 49.574219 56.232422 L 43.761719 56.230469 C 43.059869 57.710939 41.617399 58.736203 40.037109 58.876953 C 39.298069 58.940803 38.572937 58.805248 37.935547 58.486328 L 25.611328 69.322266 L 25.613281 76.734375 C 25.605281 78.643715 27.159736 80.061651 29.072266 79.894531 L 74.541016 75.910156 C 76.462106 75.740926 78.015531 74.054318 78.019531 72.148438 L 78.03125 45.378906 z "
- transform="translate(-268,635.29076)"
- id="path4633-5" />
- <path
- style="fill:#ffffff;fill-opacity:1;fill-rule:nonzero;stroke:none;display:inline;opacity:0.3"
- d="M 74.580078 23.386719 L 29.082031 27.345703 C 27.168951 27.515473 25.609469 29.193966 25.605469 31.103516 L 25.605469 31.302734 C 25.609469 29.393184 27.168951 27.714692 29.082031 27.544922 L 74.580078 23.585938 C 76.495748 23.431162 78.042931 24.838676 78.041016 26.751953 L 78.041016 26.554688 C 78.044016 24.640387 76.496418 23.231889 74.580078 23.386719 z M 60.125 42.041016 C 60.124551 42.066189 60.117191 42.089993 60.117188 42.115234 C 60.117651 42.734094 60.260989 43.33044 60.525391 43.865234 L 60.583984 43.792969 C 60.296842 43.263061 60.144234 42.663329 60.125 42.041016 z M 67.669922 43.673828 C 66.888622 44.841238 65.625617 45.603756 64.279297 45.722656 C 63.795657 45.763646 63.317491 45.72027 62.863281 45.59375 L 57.265625 52.46875 C 57.287847 52.509761 57.303524 52.553899 57.324219 52.595703 L 62.863281 45.792969 C 63.317491 45.919879 63.795657 45.962875 64.279297 45.921875 C 65.625617 45.802975 66.888622 45.040457 67.669922 43.873047 L 78.03125 45.578125 L 78.03125 45.378906 L 67.669922 43.673828 z M 57.726562 54.419922 C 57.68528 56.692433 55.844165 58.683016 53.570312 58.882812 C 51.721263 59.044523 50.093379 57.965092 49.574219 56.232422 L 43.761719 56.230469 C 43.059869 57.710939 41.617399 58.736203 40.037109 58.876953 C 39.298069 58.940803 38.572937 58.805248 37.935547 58.486328 L 25.611328 69.322266 L 25.611328 69.521484 L 37.935547 58.685547 C 38.572937 59.004467 39.298069 59.140072 40.037109 59.076172 C 41.617399 58.935422 43.059869 57.910157 43.761719 56.429688 L 49.574219 56.431641 C 50.093379 58.164311 51.721262 59.243741 53.570312 59.082031 C 55.869343 58.880021 57.733375 56.848752 57.734375 54.544922 C 57.734358 54.502688 57.727868 54.461948 57.726562 54.419922 z M 35.880859 55.181641 C 35.880238 55.211108 35.873046 55.24005 35.873047 55.269531 C 35.873907 55.623997 35.929222 55.970202 36.019531 56.304688 L 36.058594 56.269531 C 35.948415 55.921224 35.892049 55.554719 35.880859 55.181641 z "
- transform="translate(-268,635.29076)"
- id="path4481" />
- </g>
- <path
- sodipodi:nodetypes="ccccc"
- xml:space="default"
- inkscape:connector-curvature="0"
- id="path4181"
- d="m -99.97999,710.89598 0.0188,-52.54136 -52.43365,4.60327 0,52.52379 z"
- style="color:#000000;fill:#ffff00;fill-opacity:0.47839511;fill-rule:nonzero;stroke:none;stroke-width:1.70000005;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate" />
- <rect
- xml:space="default"
- style="opacity:0.8;color:#000000;fill:#ff00ff;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="rect3908"
- width="69.206657"
- height="69.206657"
- x="279.50037"
- y="648.49109"
- transform="scale(-1,1)" />
- <path
- id="path100"
- d="m -113.71761,672.66709 c -2.30015,0.20237 -4.16481,2.23468 -4.16514,4.53951 5e-4,0.6673 0.16069,1.31071 0.46714,1.87625 0,0 -5.59737,6.87651 -5.59737,6.87651 -0.45361,-0.12691 -0.93271,-0.1712 -1.41587,-0.13091 -1.58113,0.14032 -3.02501,1.16568 -3.72724,2.64681 0,0 -5.81396,-0.002 -5.81396,-0.002 -0.5189,-1.7317 -2.14454,-2.81099 -3.99237,-2.6506 -1.10445,0.0967 -2.16372,0.62919 -2.94473,1.48027 -0.78099,0.85106 -1.21974,1.95097 -1.21972,3.05771 0.001,0.41198 0.0636,0.81614 0.18481,1.19932 0,0 -10.45278,9.18929 -10.45278,9.18929 0,0 0,3.8696 0,3.8696 0,0 12.3297,-10.84125 12.3297,-10.84125 0.63739,0.31892 1.36286,0.45336 2.1019,0.38951 1.58029,-0.14075 3.02321,-1.16576 3.72506,-2.64623 0,0 5.81233,0.002 5.81233,0.002 0.51916,1.73267 2.14635,2.81208 3.9954,2.65037 2.29903,-0.20201 4.16306,-2.23263 4.16417,-4.53646 -2.7e-4,-0.6677 -0.16047,-1.31155 -0.46712,-1.87746 0,0 5.59606,-6.87475 5.59606,-6.87475 0.45421,0.12652 0.93388,0.17026 1.41752,0.12927 1.34632,-0.1189 2.60939,-0.8825 3.39069,-2.04991 0,0 10.35996,1.70595 10.35996,1.70595 0,0 0.001,-3.13044 0.001,-3.13044 0,0 -9.62326,-1.58511 -9.62326,-1.58511 -0.26284,-2.06075 -2.03015,-3.46879 -4.1263,-3.28748 0,0 0,0 0,0"
- style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-indent:0;text-align:start;text-decoration:none;line-height:normal;letter-spacing:normal;word-spacing:normal;text-transform:none;direction:ltr;block-progression:tb;writing-mode:lr-tb;text-anchor:start;baseline-shift:baseline;opacity:0.3;color:#000000;color-interpolation:sRGB;color-interpolation-filters:linearRGB;fill:#000000;fill-opacity:1;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate;clip-rule:nonzero;color-rendering:auto;image-rendering:auto;shape-rendering:auto;text-rendering:auto;font-family:sans-serif;-inkscape-font-specification:sans-serif"
- inkscape:connector-curvature="0" />
- </g>
- <g
- style="display:inline"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- id="layer3"
- inkscape:groupmode="layer" />
- <g
- sodipodi:insensitive="true"
- style="display:none"
- inkscape:label="BADGE"
- id="layer2"
- inkscape:groupmode="layer">
- <g
- clip-path="none"
- id="g4394"
- transform="translate(-340.00001,-581)"
- style="display:inline">
- <g
- id="g855">
- <g
- style="opacity:0.6;filter:url(#filter891)"
- clip-path="url(#clipPath873)"
- id="g870"
- inkscape:groupmode="maskhelper">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path844"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)" />
- </g>
- <g
- id="g862">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4398"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4400"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.25,0,0,1.25,33,-100.45273)" />
- <path
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- inkscape:randomized="0"
- inkscape:rounded="0.1"
- inkscape:flatsided="false"
- sodipodi:arg2="1.6755161"
- sodipodi:arg1="1.0471976"
- sodipodi:r2="4.3458705"
- sodipodi:r1="7.2431178"
- sodipodi:cy="589.50385"
- sodipodi:cx="666.19574"
- sodipodi:sides="5"
- id="path4459"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="star" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/ceilometer/lib/ceilometer_contexts.py b/charms/trusty/ceilometer/lib/ceilometer_contexts.py
deleted file mode 100644
index 72aea39..0000000
--- a/charms/trusty/ceilometer/lib/ceilometer_contexts.py
+++ /dev/null
@@ -1,122 +0,0 @@
-from charmhelpers.core.hookenv import (
- relation_ids,
- relation_get,
- related_units,
- config
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-from charmhelpers.contrib.openstack.context import (
- OSContextGenerator,
- context_complete,
- ApacheSSLContext as SSLContext,
-)
-
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port
-)
-
-CEILOMETER_DB = 'ceilometer'
-
-
-class LoggingConfigContext(OSContextGenerator):
- def __call__(self):
- return {'debug': config('debug'), 'verbose': config('verbose')}
-
-
-class MongoDBContext(OSContextGenerator):
- interfaces = ['mongodb']
-
- def __call__(self):
- mongo_servers = []
- replset = None
- use_replset = os_release('ceilometer-api') >= 'icehouse'
-
- for relid in relation_ids('shared-db'):
- rel_units = related_units(relid)
- use_replset = use_replset and (len(rel_units) > 1)
-
- for unit in rel_units:
- host = relation_get('hostname', unit, relid)
- port = relation_get('port', unit, relid)
-
- conf = {
- "db_host": host,
- "db_port": port,
- "db_name": CEILOMETER_DB
- }
-
- if not context_complete(conf):
- continue
-
- if not use_replset:
- return conf
-
- if replset is None:
- replset = relation_get('replset', unit, relid)
-
- mongo_servers.append('{}:{}'.format(host, port))
-
- if mongo_servers:
- return {
- 'db_mongo_servers': ','.join(mongo_servers),
- 'db_name': CEILOMETER_DB,
- 'db_replset': replset
- }
-
- return {}
-
-
-CEILOMETER_PORT = 8777
-
-
-class CeilometerContext(OSContextGenerator):
- def __call__(self):
- # Lazy-import to avoid a circular dependency in the imports
- from ceilometer_utils import get_shared_secret
-
- ctxt = {
- 'api_workers': config('api-workers'),
- 'port': CEILOMETER_PORT,
- 'metering_secret': get_shared_secret()
- }
- return ctxt
-
-
-class CeilometerServiceContext(OSContextGenerator):
- interfaces = ['ceilometer-service']
-
- def __call__(self):
- for relid in relation_ids('ceilometer-service'):
- for unit in related_units(relid):
- conf = relation_get(unit=unit, rid=relid)
- if context_complete(conf):
- return conf
- return {}
-
-
-class HAProxyContext(OSContextGenerator):
- interfaces = ['ceilometer-haproxy']
-
- def __call__(self):
- '''Extends the main charmhelpers HAProxyContext with a port mapping
- specific to this charm.
- '''
- haproxy_port = CEILOMETER_PORT
- api_port = determine_api_port(CEILOMETER_PORT, singlenode_mode=True)
- apache_port = determine_apache_port(CEILOMETER_PORT,
- singlenode_mode=True)
-
- ctxt = {
- 'service_ports': {'ceilometer_api': [haproxy_port, apache_port]},
- 'port': api_port
- }
- return ctxt
-
-
-class ApacheSSLContext(SSLContext):
-
- external_ports = [CEILOMETER_PORT]
- service_namespace = "ceilometer"
diff --git a/charms/trusty/ceilometer/lib/ceilometer_utils.py b/charms/trusty/ceilometer/lib/ceilometer_utils.py
deleted file mode 100644
index eca9c15..0000000
--- a/charms/trusty/ceilometer/lib/ceilometer_utils.py
+++ /dev/null
@@ -1,391 +0,0 @@
-import os
-import shutil
-import uuid
-
-from collections import OrderedDict
-
-import yaml
-
-from charmhelpers.contrib.openstack import (
- templating,
- context,
-)
-from ceilometer_contexts import (
- ApacheSSLContext,
- LoggingConfigContext,
- MongoDBContext,
- CeilometerContext,
- HAProxyContext,
- CEILOMETER_PORT,
-)
-from charmhelpers.contrib.openstack.utils import (
- get_os_codename_package,
- get_os_codename_install_source,
- configure_installation_source,
- pause_unit,
- resume_unit,
- make_assess_status_func,
-)
-from charmhelpers.core.hookenv import (
- config,
- log,
- related_units,
- relation_get,
- relation_ids,
-)
-from charmhelpers.fetch import apt_update, apt_install, apt_upgrade
-from copy import deepcopy
-
-HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
-CEILOMETER_CONF_DIR = "/etc/ceilometer"
-CEILOMETER_CONF = "%s/ceilometer.conf" % CEILOMETER_CONF_DIR
-CEILOMETER_PIPELINE_CONF = '%s/pipeline.yaml' % CEILOMETER_CONF_DIR
-CEILOMETER_PIPELINE_CONF_ORIG = '%s.orig' % CEILOMETER_PIPELINE_CONF
-HTTPS_APACHE_CONF = "/etc/apache2/sites-available/openstack_https_frontend"
-HTTPS_APACHE_24_CONF = "/etc/apache2/sites-available/" \
- "openstack_https_frontend.conf"
-CLUSTER_RES = 'grp_ceilometer_vips'
-
-CEILOMETER_BASE_SERVICES = [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
-]
-
-ICEHOUSE_SERVICES = [
- 'ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'
-]
-
-MITAKA_SERVICES = [
- 'ceilometer-agent-notification'
-]
-
-CEILOMETER_DB = "ceilometer"
-CEILOMETER_SERVICE = "ceilometer"
-
-CEILOMETER_BASE_PACKAGES = [
- 'haproxy',
- 'apache2',
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'python-pymongo',
-]
-
-ICEHOUSE_PACKAGES = [
- 'ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'
-]
-
-MITAKA_PACKAGES = [
- 'ceilometer-agent-notification'
-]
-
-REQUIRED_INTERFACES = {
- 'database': ['mongodb'],
- 'messaging': ['amqp'],
- 'identity': ['identity-service'],
-}
-
-CEILOMETER_ROLE = "ResellerAdmin"
-SVC = 'ceilometer'
-
-CONFIG_FILES = OrderedDict([
- (CEILOMETER_CONF, {
- 'hook_contexts': [context.IdentityServiceContext(service=SVC,
- service_user=SVC),
- context.AMQPContext(ssl_dir=CEILOMETER_CONF_DIR),
- LoggingConfigContext(),
- MongoDBContext(),
- CeilometerContext(),
- context.SyslogContext(),
- HAProxyContext()],
- 'services': CEILOMETER_BASE_SERVICES
- }),
- (HAPROXY_CONF, {
- 'hook_contexts': [context.HAProxyContext(singlenode_mode=True),
- HAProxyContext()],
- 'services': ['haproxy'],
- }),
- (HTTPS_APACHE_CONF, {
- 'hook_contexts': [ApacheSSLContext()],
- 'services': ['apache2'],
- }),
- (HTTPS_APACHE_24_CONF, {
- 'hook_contexts': [ApacheSSLContext()],
- 'services': ['apache2'],
- })
-])
-
-TEMPLATES = 'templates'
-
-JUJU_HEADER = '# [ WARNING ] config file maintained by Juju, local changes may be overwritten.'
-
-SHARED_SECRET = "/etc/ceilometer/secret.txt"
-
-
-def ordereddict_constructor(loader, node):
- return OrderedDict(loader.construct_pairs(node))
-
-
-def ordereddict_representer(dumper, data):
- return dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
-
-
-yaml.add_constructor('tag:yaml.org,2002:map', ordereddict_constructor)
-yaml.add_representer(OrderedDict, ordereddict_representer)
-
-
-def register_configs():
- """
- Register config files with their respective contexts.
- Regstration of some configs may not be required depending on
- existing of certain relations.
- """
- # if called without anything installed (eg during install hook)
- # just default to earliest supported release. configs dont get touched
- # till post-install, anyway.
- release = get_os_codename_package('ceilometer-common', fatal=False) \
- or 'grizzly'
- configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
- openstack_release=release)
-
- for conf in CONFIG_FILES:
- configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
-
- if os.path.exists('/etc/apache2/conf-available'):
- configs.register(HTTPS_APACHE_24_CONF,
- CONFIG_FILES[HTTPS_APACHE_24_CONF]['hook_contexts'])
- else:
- configs.register(HTTPS_APACHE_CONF,
- CONFIG_FILES[HTTPS_APACHE_CONF]['hook_contexts'])
- return configs
-
-
-def restart_map():
- """
- Determine the correct resource map to be passed to
- charmhelpers.core.restart_on_change() based on the services configured.
-
- :returns: dict: A dictionary mapping config file to lists of services
- that should be restarted when file changes.
- """
- _map = {}
- for f, ctxt in CONFIG_FILES.iteritems():
- svcs = []
- for svc in ctxt['services']:
- svcs.append(svc)
- if f == CEILOMETER_CONF:
- for svc in ceilometer_release_services():
- svcs.append(svc)
- if svcs:
- _map[f] = svcs
-
- _map[CEILOMETER_PIPELINE_CONF] = CEILOMETER_BASE_SERVICES
-
- return _map
-
-
-def services():
- """ Returns a list of services associate with this charm """
- _services = []
- for v in restart_map().values():
- _services = _services + v
- return list(set(_services))
-
-
-def determine_ports():
- """Assemble a list of API ports for services the charm is managing
-
- @returns [ports] - list of ports that the charm manages.
- """
- # TODO(ajkavanagh) - determine what other ports the service listens on
- # apart from the main CEILOMETER port
- ports = [CEILOMETER_PORT]
- return ports
-
-
-def get_ceilometer_context():
- """ Retrieve a map of all current relation data for agent configuration """
- ctxt = {}
- for hcontext in CONFIG_FILES[CEILOMETER_CONF]['hook_contexts']:
- ctxt.update(hcontext())
- return ctxt
-
-
-def do_openstack_upgrade(configs):
- """
- Perform an upgrade. Takes care of upgrading packages, rewriting
- configs, database migrations and potentially any other post-upgrade
- actions.
-
- :param configs: The charms main OSConfigRenderer object.
- """
- new_src = config('openstack-origin')
- new_os_rel = get_os_codename_install_source(new_src)
-
- log('Performing OpenStack upgrade to %s.' % (new_os_rel))
-
- configure_installation_source(new_src)
- dpkg_opts = [
- '--option', 'Dpkg::Options::=--force-confnew',
- '--option', 'Dpkg::Options::=--force-confdef',
- ]
- apt_update(fatal=True)
- apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
- apt_install(packages=get_packages(),
- options=dpkg_opts,
- fatal=True)
-
- # set CONFIGS to load templates from new release
- configs.set_release(openstack_release=new_os_rel)
-
-
-def ceilometer_release_services():
- codename = get_os_codename_install_source(config('openstack-origin'))
- if codename >= 'mitaka':
- return MITAKA_SERVICES
- elif codename >= 'icehouse':
- return ICEHOUSE_SERVICES
- else:
- return []
-
-
-def ceilometer_release_packages():
- codename = get_os_codename_install_source(config('openstack-origin'))
- if codename >= 'mitaka':
- return MITAKA_PACKAGES
- elif codename >= 'icehouse':
- return ICEHOUSE_PACKAGES
- else:
- return []
-
-
-def get_packages():
- packages = (deepcopy(CEILOMETER_BASE_PACKAGES) +
- ceilometer_release_packages())
- return packages
-
-
-def get_shared_secret():
- """
- Returns the current shared secret for the ceilometer node. If the shared
- secret does not exist, this method will generate one.
- """
- secret = None
- if not os.path.exists(SHARED_SECRET):
- secret = str(uuid.uuid4())
- set_shared_secret(secret)
- else:
- with open(SHARED_SECRET, 'r') as secret_file:
- secret = secret_file.read().strip()
- return secret
-
-
-def set_shared_secret(secret):
- """
- Sets the shared secret which is used to sign ceilometer messages.
-
- :param secret: the secret to set
- """
- with open(SHARED_SECRET, 'w') as secret_file:
- secret_file.write(secret)
-
-
-def assess_status(configs):
- """Assess status of current unit
-
- Decides what the state of the unit should be based on the current
- configuration.
-
- SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
- status of the unit.
- Also calls status_set(...) directly if paused state isn't complete.
-
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- assess_status_func(configs)()
-
-
-def assess_status_func(configs):
- """Helper function to create the function that will assess_status() for
- the unit.
- Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
- create the appropriate status function and then returns it.
- Used directly by assess_status() and also for pausing and resuming
- the unit.
-
- @param configs: a templating.OSConfigRenderer() object
- @return f() -> None : a function that assesses the unit's workload status
- """
- return make_assess_status_func(
- configs, REQUIRED_INTERFACES,
- services=services(), ports=determine_ports())
-
-
-def pause_unit_helper(configs):
- """Helper function to pause a unit, and then call assess_status(...) in
- effect, so that the status is correctly updated.
- Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
-
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- _pause_resume_helper(pause_unit, configs)
-
-
-def resume_unit_helper(configs):
- """Helper function to resume a unit, and then call assess_status(...) in
- effect, so that the status is correctly updated.
- Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
-
- @param configs: a templating.OSConfigRenderer() object
- @returns None - this function is executed for its side-effect
- """
- _pause_resume_helper(resume_unit, configs)
-
-
-def _pause_resume_helper(f, configs):
- """Helper function that uses the make_assess_status_func(...) from
- charmhelpers.contrib.openstack.utils to create an assess_status(...)
- function that can be used with the pause/resume of the unit
-
- @param f: the function to be used with the assess_status(...) function
- @returns None - this function is executed for its side-effect
- """
- # TODO(ajkavanagh) - ports= has been left off because of the race hazard
- # that exists due to service_start()
- f(assess_status_func(configs),
- services=services(),
- ports=determine_ports())
-
-
-def configure_pipeline():
- if not os.path.exists(CEILOMETER_PIPELINE_CONF_ORIG):
- shutil.copy(CEILOMETER_PIPELINE_CONF,
- CEILOMETER_PIPELINE_CONF_ORIG)
- with open(CEILOMETER_PIPELINE_CONF_ORIG) as f:
- conf = yaml.load(f)
-
- sources = conf['sources']
- sinks = conf['sinks']
- for rid in relation_ids('ceilometer-plugin'):
- for unit in related_units(rid):
- srcs = relation_get('meter-sources', unit, rid)
- if srcs:
- srcs = yaml.load(srcs)
- sources.extend(srcs)
- sks = relation_get('meter-sinks', unit, rid)
- if sks:
- sks = yaml.load(sks)
- sinks.extend(sks)
-
- log('Writing config %s' % CEILOMETER_PIPELINE_CONF)
- with open(CEILOMETER_PIPELINE_CONF, 'w') as f:
- f.write(JUJU_HEADER)
- f.write('\n')
- yaml.dump(conf, f, default_flow_style=False, explicit_start=True)
diff --git a/charms/trusty/ceilometer/metadata.yaml b/charms/trusty/ceilometer/metadata.yaml
deleted file mode 100644
index 6b594a2..0000000
--- a/charms/trusty/ceilometer/metadata.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: ceilometer
-summary: OpenStack Telemetry
-maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
-description: |
- Ceilometer project aims to become the infrastructure to collect measurements
- within OpenStack so that no two agents would need to be written to collect
- the same data. It's primary targets are monitoring and metering, but the
- framework should be easily expandable to collect for other needs. To that
- effect, Ceilometer should be able to share collected data with a variety
- of consumers.
-tags:
- - openstack
- - telemetry
- - misc
-extra-bindings:
- public:
- admin:
- internal:
-provides:
- nrpe-external-master:
- interface: nrpe-external-master
- scope: container
- ceilometer-service:
- interface: ceilometer
-requires:
- shared-db:
- interface: mongodb
- amqp:
- interface: rabbitmq
- identity-service:
- interface: keystone
- identity-notifications:
- interface: keystone-notifications
- ha:
- interface: hacluster
- scope: container
- ceilometer-plugin:
- interface: ceilometer-plugin
- scope: container
-peers:
- cluster:
- interface: ceilometer-ha
diff --git a/charms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central b/charms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central
deleted file mode 100755
index 9c460a3..0000000
--- a/charms/trusty/ceilometer/ocf/openstack/ceilometer-agent-central
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/bin/sh
-#
-#
-# OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-#
-# Description: Manages an OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) process as an HA resource
-#
-# Authors: Emilien Macchi
-# Mainly inspired by the Nova Scheduler resource agent written by Sebastien Han
-#
-# Support: openstack@lists.launchpad.net
-# License: Apache Software License (ASL) 2.0
-#
-#
-# See usage() function below for more details ...
-#
-# OCF instance parameters:
-# OCF_RESKEY_binary
-# OCF_RESKEY_config
-# OCF_RESKEY_user
-# OCF_RESKEY_pid
-# OCF_RESKEY_monitor_binary
-# OCF_RESKEY_amqp_server_port
-# OCF_RESKEY_additional_parameters
-#######################################################################
-# Initialization:
-
-: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
-. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
-
-#######################################################################
-
-# Fill in some defaults if no values are specified
-
-OCF_RESKEY_binary_default="ceilometer-agent-central"
-OCF_RESKEY_config_default="/etc/ceilometer/ceilometer.conf"
-OCF_RESKEY_user_default="ceilometer"
-OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
-OCF_RESKEY_amqp_server_port_default="5672"
-
-: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
-: ${OCF_RESKEY_config=${OCF_RESKEY_config_default}}
-: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
-: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
-: ${OCF_RESKEY_amqp_server_port=${OCF_RESKEY_amqp_server_port_default}}
-
-#######################################################################
-
-usage() {
- cat <<UEND
- usage: $0 (start|stop|validate-all|meta-data|status|monitor)
-
- $0 manages an OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) process as an HA resource
-
- The 'start' operation starts the scheduler service.
- The 'stop' operation stops the scheduler service.
- The 'validate-all' operation reports whether the parameters are valid
- The 'meta-data' operation reports this RA's meta-data information
- The 'status' operation reports whether the scheduler service is running
- The 'monitor' operation reports whether the scheduler service seems to be working
-
-UEND
-}
-
-meta_data() {
- cat <<END
-<?xml version="1.0"?>
-<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
-<resource-agent name="ceilometer-agent-central">
-<version>1.0</version>
-
-<longdesc lang="en">
-Resource agent for the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-May manage a ceilometer-agent-central instance or a clone set that
-creates a distributed ceilometer-agent-central cluster.
-</longdesc>
-<shortdesc lang="en">Manages the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)</shortdesc>
-<parameters>
-
-<parameter name="binary" unique="0" required="0">
-<longdesc lang="en">
-Location of the OpenStack Ceilometer Central Agent server binary (ceilometer-agent-central)
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent server binary (ceilometer-agent-central)</shortdesc>
-<content type="string" default="${OCF_RESKEY_binary_default}" />
-</parameter>
-
-<parameter name="config" unique="0" required="0">
-<longdesc lang="en">
-Location of the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) configuration file
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent (ceilometer-agent-central registry) config file</shortdesc>
-<content type="string" default="${OCF_RESKEY_config_default}" />
-</parameter>
-
-<parameter name="user" unique="0" required="0">
-<longdesc lang="en">
-User running OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) user</shortdesc>
-<content type="string" default="${OCF_RESKEY_user_default}" />
-</parameter>
-
-<parameter name="pid" unique="0" required="0">
-<longdesc lang="en">
-The pid file to use for this OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) instance
-</longdesc>
-<shortdesc lang="en">OpenStack Ceilometer Central Agent Service (ceilometer-agent-central) pid file</shortdesc>
-<content type="string" default="${OCF_RESKEY_pid_default}" />
-</parameter>
-
-<parameter name="amqp_server_port" unique="0" required="0">
-<longdesc lang="en">
-The listening port number of the AMQP server. Use for monitoring purposes
-</longdesc>
-<shortdesc lang="en">AMQP listening port</shortdesc>
-<content type="integer" default="${OCF_RESKEY_amqp_server_port_default}" />
-</parameter>
-
-
-<parameter name="additional_parameters" unique="0" required="0">
-<longdesc lang="en">
-Additional parameters to pass on to the OpenStack Ceilometer Central Agent Service (ceilometer-agent-central)
-</longdesc>
-<shortdesc lang="en">Additional parameters for ceilometer-agent-central</shortdesc>
-<content type="string" />
-</parameter>
-
-</parameters>
-
-<actions>
-<action name="start" timeout="20" />
-<action name="stop" timeout="20" />
-<action name="status" timeout="20" />
-<action name="monitor" timeout="30" interval="20" />
-<action name="validate-all" timeout="5" />
-<action name="meta-data" timeout="5" />
-</actions>
-</resource-agent>
-END
-}
-
-#######################################################################
-# Functions invoked by resource manager actions
-
-ceilometer_agent_central_check_port() {
-# This function has been taken from the squid RA and improved a bit
-# The length of the integer must be 4
-# Examples of valid port: "1080", "0080"
-# Examples of invalid port: "1080bad", "0", "0000", ""
-
- local int
- local cnt
-
- int="$1"
- cnt=${#int}
- echo $int |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*'
-
- if [ $? -ne 0 ] || [ $cnt -ne 4 ]; then
- ocf_log err "Invalid port number: $1"
- exit $OCF_ERR_CONFIGURED
- fi
-}
-
-ceilometer_agent_central_validate() {
- local rc
-
- check_binary $OCF_RESKEY_binary
- check_binary netstat
- ceilometer_agent_central_check_port $OCF_RESKEY_amqp_server_port
-
- # A config file on shared storage that is not available
- # during probes is OK.
- if [ ! -f $OCF_RESKEY_config ]; then
- if ! ocf_is_probe; then
- ocf_log err "Config $OCF_RESKEY_config doesn't exist"
- return $OCF_ERR_INSTALLED
- fi
- ocf_log_warn "Config $OCF_RESKEY_config not available during a probe"
- fi
-
- getent passwd $OCF_RESKEY_user >/dev/null 2>&1
- rc=$?
- if [ $rc -ne 0 ]; then
- ocf_log err "User $OCF_RESKEY_user doesn't exist"
- return $OCF_ERR_INSTALLED
- fi
-
- true
-}
-
-ceilometer_agent_central_status() {
- local pid
- local rc
-
- if [ ! -f $OCF_RESKEY_pid ]; then
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) is not running"
- return $OCF_NOT_RUNNING
- else
- pid=`cat $OCF_RESKEY_pid`
- fi
-
- ocf_run -warn kill -s 0 $pid
- rc=$?
- if [ $rc -eq 0 ]; then
- return $OCF_SUCCESS
- else
- ocf_log info "Old PID file found, but OpenStack Ceilometer Central Agent (ceilometer-agent-central) is not running"
- return $OCF_NOT_RUNNING
- fi
-}
-
-ceilometer_agent_central_monitor() {
- local rc
- local pid
- local scheduler_amqp_check
-
- ceilometer_agent_central_status
- rc=$?
-
- # If status returned anything but success, return that immediately
- if [ $rc -ne $OCF_SUCCESS ]; then
- return $rc
- fi
-
- # Check the connections according to the PID.
- # We are sure to hit the scheduler process and not other Cinder process with the same connection behavior (for example cinder-api)
- pid=`cat $OCF_RESKEY_pid`
- scheduler_amqp_check=`netstat -punt | grep -s "$OCF_RESKEY_amqp_server_port" | grep -s "$pid" | grep -qs "ESTABLISHED"`
- rc=$?
- if [ $rc -ne 0 ]; then
- ocf_log err "Central Agent is not connected to the AMQP server : $rc"
- return $OCF_NOT_RUNNING
- fi
-
- ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-central) monitor succeeded"
- return $OCF_SUCCESS
-}
-
-ceilometer_agent_central_start() {
- local rc
-
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -eq $OCF_SUCCESS ]; then
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) already running"
- return $OCF_SUCCESS
- fi
-
- # run the actual ceilometer-agent-central daemon. Don't use ocf_run as we're sending the tool's output
- # straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
- su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
- $OCF_RESKEY_additional_parameters"' >> /dev/null 2>&1 & echo $!' > $OCF_RESKEY_pid
-
- # Spin waiting for the server to come up.
- while true; do
- ceilometer_agent_central_monitor
- rc=$?
- [ $rc -eq $OCF_SUCCESS ] && break
- if [ $rc -ne $OCF_NOT_RUNNING ]; then
- ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-central) start failed"
- exit $OCF_ERR_GENERIC
- fi
- sleep 1
- done
-
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) started"
- return $OCF_SUCCESS
-}
-
-ceilometer_agent_central_stop() {
- local rc
- local pid
-
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -eq $OCF_NOT_RUNNING ]; then
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) already stopped"
- return $OCF_SUCCESS
- fi
-
- # Try SIGTERM
- pid=`cat $OCF_RESKEY_pid`
- ocf_run kill -s TERM $pid
- rc=$?
- if [ $rc -ne 0 ]; then
- ocf_log err "OpenStack Ceilometer Central Agent (ceilometer-agent-central) couldn't be stopped"
- exit $OCF_ERR_GENERIC
- fi
-
- # stop waiting
- shutdown_timeout=15
- if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then
- shutdown_timeout=$((($OCF_RESKEY_CRM_meta_timeout/1000)-5))
- fi
- count=0
- while [ $count -lt $shutdown_timeout ]; do
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -eq $OCF_NOT_RUNNING ]; then
- break
- fi
- count=`expr $count + 1`
- sleep 1
- ocf_log debug "OpenStack Ceilometer Central Agent (ceilometer-agent-central) still hasn't stopped yet. Waiting ..."
- done
-
- ceilometer_agent_central_status
- rc=$?
- if [ $rc -ne $OCF_NOT_RUNNING ]; then
- # SIGTERM didn't help either, try SIGKILL
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) failed to stop after ${shutdown_timeout}s \
- using SIGTERM. Trying SIGKILL ..."
- ocf_run kill -s KILL $pid
- fi
-
- ocf_log info "OpenStack Ceilometer Central Agent (ceilometer-agent-central) stopped"
-
- rm -f $OCF_RESKEY_pid
-
- return $OCF_SUCCESS
-}
-
-#######################################################################
-
-case "$1" in
- meta-data) meta_data
- exit $OCF_SUCCESS;;
- usage|help) usage
- exit $OCF_SUCCESS;;
-esac
-
-# Anything except meta-data and help must pass validation
-ceilometer_agent_central_validate || exit $?
-
-# What kind of method was invoked?
-case "$1" in
- start) ceilometer_agent_central_start;;
- stop) ceilometer_agent_central_stop;;
- status) ceilometer_agent_central_status;;
- monitor) ceilometer_agent_central_monitor;;
- validate-all) ;;
- *) usage
- exit $OCF_ERR_UNIMPLEMENTED;;
-esac
diff --git a/charms/trusty/ceilometer/requirements.txt b/charms/trusty/ceilometer/requirements.txt
deleted file mode 100644
index 426002d..0000000
--- a/charms/trusty/ceilometer/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-PyYAML>=3.1.0
-simplejson>=2.2.0
-netifaces>=0.10.4
-netaddr>=0.7.12,!=0.7.16
-Jinja2>=2.6 # BSD License (3 clause)
-six>=1.9.0
-dnspython>=1.12.0
-psutil>=1.1.1,<2.0.0
diff --git a/charms/trusty/ceilometer/revision b/charms/trusty/ceilometer/revision
deleted file mode 100644
index c739b42..0000000
--- a/charms/trusty/ceilometer/revision
+++ /dev/null
@@ -1 +0,0 @@
-44
diff --git a/charms/trusty/ceilometer/setup.cfg b/charms/trusty/ceilometer/setup.cfg
deleted file mode 100644
index 3f7bd91..0000000
--- a/charms/trusty/ceilometer/setup.cfg
+++ /dev/null
@@ -1,6 +0,0 @@
-[nosetests]
-verbosity=2
-with-coverage=1
-cover-erase=1
-cover-package=hooks
-
diff --git a/charms/trusty/ceilometer/templates/icehouse/ceilometer.conf b/charms/trusty/ceilometer/templates/icehouse/ceilometer.conf
deleted file mode 100644
index 4b6b6d1..0000000
--- a/charms/trusty/ceilometer/templates/icehouse/ceilometer.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# icehouse
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-use_syslog = {{ use_syslog }}
-
-{% include "parts/rabbitmq" -%}
-
-[api]
-port = {{ port }}
-
-[service_credentials]
-os_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-
-[database]
-{% if db_replset: -%}
-connection = mongodb://{{ db_mongo_servers }}/{{ db_name }}?readPreference=primaryPreferred&replicaSet={{ db_replset }}
-mongodb_replica_set = {{ db_replset }}
-{% else -%}
-connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
-{% endif %}
-
-[publisher_rpc]
-metering_secret = {{ metering_secret }}
-
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
-auth_host = {{ auth_host }}
-auth_port = {{ auth_port }}
-auth_protocol = {{ auth_protocol }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
diff --git a/charms/trusty/ceilometer/templates/kilo/ceilometer.conf b/charms/trusty/ceilometer/templates/kilo/ceilometer.conf
deleted file mode 100644
index da44948..0000000
--- a/charms/trusty/ceilometer/templates/kilo/ceilometer.conf
+++ /dev/null
@@ -1,43 +0,0 @@
-# kilo
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-use_syslog = {{ use_syslog }}
-api_workers = {{ api_workers }}
-
-{% include "parts/rabbitmq" -%}
-
-[api]
-port = {{ port }}
-
-[service_credentials]
-os_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-
-[database]
-{% if db_replset: -%}
-connection = mongodb://{{ db_mongo_servers }}/{{ db_name }}?readPreference=primaryPreferred&replicaSet={{ db_replset }}
-mongodb_replica_set = {{ db_replset }}
-{% else -%}
-connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
-{% endif %}
-
-[publisher_rpc]
-metering_secret = {{ metering_secret }}
-
-[keystone_authtoken]
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
-auth_host = {{ auth_host }}
-auth_port = {{ auth_port }}
-auth_protocol = {{ auth_protocol }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
diff --git a/charms/trusty/ceilometer/templates/mitaka/ceilometer.conf b/charms/trusty/ceilometer/templates/mitaka/ceilometer.conf
deleted file mode 100644
index d026133..0000000
--- a/charms/trusty/ceilometer/templates/mitaka/ceilometer.conf
+++ /dev/null
@@ -1,42 +0,0 @@
-# mitaka
-###############################################################################
-# [ WARNING ]
-# ceilometer configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[DEFAULT]
-debug = {{ debug }}
-verbose = {{ verbose }}
-use_syslog = {{ use_syslog }}
-
-[api]
-port = {{ port }}
-workers = {{ api_workers }}
-
-[service_credentials]
-os_auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
-os_tenant_name = {{ admin_tenant_name }}
-os_username = {{ admin_user }}
-os_password = {{ admin_password }}
-
-[database]
-{% if db_replset: -%}
-connection = mongodb://{{ db_mongo_servers }}/{{ db_name }}?readPreference=primaryPreferred&replicaSet={{ db_replset }}
-mongodb_replica_set = {{ db_replset }}
-{% else -%}
-connection = mongodb://{{ db_host }}:{{ db_port }}/{{ db_name }}
-{% endif %}
-
-[publisher]
-telemetry_secret = {{ metering_secret }}
-
-[keystone_authtoken]
-auth_type = password
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
-auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/
-project_name = {{ admin_tenant_name }}
-username = {{ admin_user }}
-password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-
-{% include "section-rabbitmq-oslo" %}
diff --git a/charms/trusty/ceilometer/templates/parts/rabbitmq b/charms/trusty/ceilometer/templates/parts/rabbitmq
deleted file mode 100644
index bbd0371..0000000
--- a/charms/trusty/ceilometer/templates/parts/rabbitmq
+++ /dev/null
@@ -1,21 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%} \ No newline at end of file
diff --git a/charms/trusty/ceilometer/test-requirements.txt b/charms/trusty/ceilometer/test-requirements.txt
deleted file mode 100644
index 4faf254..0000000
--- a/charms/trusty/ceilometer/test-requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-coverage>=3.6
-mock>=1.2
-flake8>=2.2.4,<=2.4.1
-os-testr>=0.4.1
-charm-tools>=2.0.0
-requests==2.6.0
diff --git a/charms/trusty/ceilometer/tests/014-basic-precise-icehouse b/charms/trusty/ceilometer/tests/014-basic-precise-icehouse
deleted file mode 100755
index 0f5c590..0000000
--- a/charms/trusty/ceilometer/tests/014-basic-precise-icehouse
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on precise-icehouse."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='precise',
- openstack='cloud:precise-icehouse',
- source='cloud:precise-updates/icehouse')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse b/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse
deleted file mode 100755
index 8530390..0000000
--- a/charms/trusty/ceilometer/tests/015-basic-trusty-icehouse
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-icehouse."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/016-basic-trusty-juno b/charms/trusty/ceilometer/tests/016-basic-trusty-juno
deleted file mode 100755
index f1ca57d..0000000
--- a/charms/trusty/ceilometer/tests/016-basic-trusty-juno
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-juno."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-juno',
- source='cloud:trusty-updates/juno')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/017-basic-trusty-kilo b/charms/trusty/ceilometer/tests/017-basic-trusty-kilo
deleted file mode 100755
index cc89564..0000000
--- a/charms/trusty/ceilometer/tests/017-basic-trusty-kilo
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-kilo."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-kilo',
- source='cloud:trusty-updates/kilo')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/018-basic-trusty-liberty b/charms/trusty/ceilometer/tests/018-basic-trusty-liberty
deleted file mode 100755
index 18b9500..0000000
--- a/charms/trusty/ceilometer/tests/018-basic-trusty-liberty
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-liberty."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-liberty',
- source='cloud:trusty-updates/liberty')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka b/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka
deleted file mode 100755
index 06c0849..0000000
--- a/charms/trusty/ceilometer/tests/019-basic-trusty-mitaka
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on trusty-mitaka."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='trusty',
- openstack='cloud:trusty-mitaka',
- source='cloud:trusty-updates/mitaka')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/020-basic-wily-liberty b/charms/trusty/ceilometer/tests/020-basic-wily-liberty
deleted file mode 100755
index 7b5908f..0000000
--- a/charms/trusty/ceilometer/tests/020-basic-wily-liberty
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on wily-liberty."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='wily')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka b/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka
deleted file mode 100755
index 1706607..0000000
--- a/charms/trusty/ceilometer/tests/021-basic-xenial-mitaka
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/python
-
-"""Amulet tests on a basic ceilometer deployment on xenial-mitaka."""
-
-from basic_deployment import CeilometerBasicDeployment
-
-if __name__ == '__main__':
- deployment = CeilometerBasicDeployment(series='xenial')
- deployment.run_tests()
diff --git a/charms/trusty/ceilometer/tests/README b/charms/trusty/ceilometer/tests/README
deleted file mode 100644
index 79c5b06..0000000
--- a/charms/trusty/ceilometer/tests/README
+++ /dev/null
@@ -1,113 +0,0 @@
-This directory provides Amulet tests to verify basic deployment functionality
-from the perspective of this charm, its requirements and its features, as
-exercised in a subset of the full OpenStack deployment test bundle topology.
-
-Reference: lp:openstack-charm-testing for full test bundles.
-
-A single topology and configuration is defined and deployed, once for each of
-the defined Ubuntu:OpenStack release combos. The ongoing goal is for this
-charm to always possess tests and combo definitions for all currently-supported
-release combinations of U:OS.
-
-test_* methods are called in lexical sort order, as with most runners. However,
-each individual test method should be idempotent and expected to pass regardless
-of run order or Ubuntu:OpenStack combo. When writing or modifying tests,
-ensure that every individual test is not dependent on another test_ method.
-
-Test naming convention, purely for code organization purposes:
- 1xx service and endpoint checks
- 2xx relation checks
- 3xx config checks
- 4xx functional checks
- 9xx restarts, config changes, actions and other final checks
-
-In order to run tests, charm-tools and juju must be installed:
- sudo add-apt-repository ppa:juju/stable
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer amulet
-
-Alternatively, tests may be exercised with proposed or development versions
-of juju and related tools:
-
- # juju proposed version
- sudo add-apt-repository ppa:juju/proposed
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer
-
- # juju development version
- sudo add-apt-repository ppa:juju/devel
- sudo apt-get update
- sudo apt-get install charm-tools juju juju-deployer
-
-Some tests may need to download files. If a web proxy server is required in
-the environment, the AMULET_HTTP_PROXY environment variable must be set and
-passed into the juju test command. This is unrelated to juju's http proxy
-settings or behavior.
-
-The following examples demonstrate different ways that tests can be executed.
-All examples are run from the charm's root directory.
-
- * To run all +x tests in the tests directory:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- make functional_test
-
- * To run the tests against a specific release combo as defined in tests/:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- juju test -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
-
- * To run tests and keep the juju environment deployed after a failure:
-
- bzr branch lp:charms/trusty/foo
- cd foo
- juju test --set-e -v -p AMULET_HTTP_PROXY 015-basic-trusty-icehouse
-
- * To re-run a test module against an already deployed environment (one
- that was deployed by a previous call to 'juju test --set-e'):
-
- ./tests/015-basic-trusty-icehouse
-
- * Even with --set-e, `juju test` will tear down the deployment when all
- tests pass. The following work flow may be more effective when
- iterating on test writing.
-
- bzr branch lp:charms/trusty/foo
- cd foo
- ./tests/setup/00-setup
- juju bootstrap
- ./tests/015-basic-trusty-icehouse
- # make some changes, run tests again
- ./tests/015-basic-trusty-icehouse
- # make some changes, run tests again
- ./tests/015-basic-trusty-icehouse
-
- * There may be test definitions in the tests/ dir which are not set +x
- executable. This is generally true for deprecated releases, or for
- upcoming releases which are not yet validated and enabled. To enable
- and run these tests:
- bzr branch lp:charms/trusty/foo
- cd foo
- ls tests
- chmod +x tests/017-basic-trusty-kilo
- ./tests/setup/00-setup
- juju bootstrap
- ./tests/017-basic-trusty-kilo
-
-
-Additional notes:
-
- * Use DEBUG to turn on debug logging, use ERROR otherwise.
- u = OpenStackAmuletUtils(ERROR)
- u = OpenStackAmuletUtils(DEBUG)
-
- * To interact with the deployed environment:
- export OS_USERNAME=admin
- export OS_PASSWORD=openstack
- export OS_TENANT_NAME=admin
- export OS_REGION_NAME=RegionOne
- export OS_AUTH_URL=${OS_AUTH_PROTOCOL:-http}://`juju-deployer -e trusty -f keystone`:5000/v2.0
- keystone user-list
- glance image-list
diff --git a/charms/trusty/ceilometer/tests/basic_deployment.py b/charms/trusty/ceilometer/tests/basic_deployment.py
deleted file mode 100644
index 9769759..0000000
--- a/charms/trusty/ceilometer/tests/basic_deployment.py
+++ /dev/null
@@ -1,664 +0,0 @@
-import subprocess
-import amulet
-import json
-import time
-import ceilometerclient.v2.client as ceilo_client
-
-from charmhelpers.contrib.openstack.amulet.deployment import (
- OpenStackAmuletDeployment
-)
-
-from charmhelpers.contrib.openstack.amulet.utils import (
- OpenStackAmuletUtils,
- DEBUG,
- # ERROR
-)
-
-# Use DEBUG to turn on debug logging
-u = OpenStackAmuletUtils(DEBUG)
-
-
-class CeilometerBasicDeployment(OpenStackAmuletDeployment):
- """Amulet tests on a basic ceilometer deployment."""
-
- def __init__(self, series, openstack=None, source=None, stable=True):
- """Deploy the entire test environment."""
- super(CeilometerBasicDeployment, self).__init__(series, openstack,
- source, stable)
- self._add_services()
- self._add_relations()
- self._configure_services()
- self._deploy()
-
- u.log.info('Waiting on extended status checks...')
- exclude_services = ['mysql', 'mongodb']
- self._auto_wait_for_status(exclude_services=exclude_services)
-
- self._initialize_tests()
-
- def _add_services(self):
- """Add services
-
- Add the services that we're testing, where ceilometer is local,
- and the rest of the service are from lp branches that are
- compatible with the local charm (e.g. stable or next).
- """
- this_service = {'name': 'ceilometer'}
- other_services = [{'name': 'mysql'},
- {'name': 'rabbitmq-server'},
- {'name': 'keystone'},
- {'name': 'mongodb'},
- {'name': 'glance'}, # to satisfy workload status
- {'name': 'ceilometer-agent'},
- {'name': 'nova-compute'}]
- super(CeilometerBasicDeployment, self)._add_services(this_service,
- other_services)
-
- def _add_relations(self):
- """Add all of the relations for the services."""
- relations = {
- 'ceilometer:shared-db': 'mongodb:database',
- 'ceilometer:amqp': 'rabbitmq-server:amqp',
- 'ceilometer:identity-service': 'keystone:identity-service',
- 'ceilometer:identity-notifications': 'keystone:'
- 'identity-notifications',
- 'keystone:shared-db': 'mysql:shared-db',
- 'ceilometer:ceilometer-service': 'ceilometer-agent:'
- 'ceilometer-service',
- 'nova-compute:nova-ceilometer': 'ceilometer-agent:nova-ceilometer',
- 'nova-compute:shared-db': 'mysql:shared-db',
- 'nova-compute:amqp': 'rabbitmq-server:amqp',
- 'glance:identity-service': 'keystone:identity-service',
- 'glance:shared-db': 'mysql:shared-db',
- 'glance:amqp': 'rabbitmq-server:amqp',
- 'nova-compute:image-service': 'glance:image-service'
- }
- super(CeilometerBasicDeployment, self)._add_relations(relations)
-
- def _configure_services(self):
- """Configure all of the services."""
- keystone_config = {'admin-password': 'openstack',
- 'admin-token': 'ubuntutesting'}
- configs = {'keystone': keystone_config}
- super(CeilometerBasicDeployment, self)._configure_services(configs)
-
- def _get_token(self):
- return self.keystone.service_catalog.catalog['token']['id']
-
- def _initialize_tests(self):
- """Perform final initialization before tests get run."""
- # Access the sentries for inspecting service units
- self.ceil_sentry = self.d.sentry.unit['ceilometer/0']
- self.ceil_agent_sentry = self.d.sentry.unit['ceilometer-agent/0']
- self.mysql_sentry = self.d.sentry.unit['mysql/0']
- self.keystone_sentry = self.d.sentry.unit['keystone/0']
- self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
- self.mongodb_sentry = self.d.sentry.unit['mongodb/0']
- self.nova_sentry = self.d.sentry.unit['nova-compute/0']
- u.log.debug('openstack release val: {}'.format(
- self._get_openstack_release()))
- u.log.debug('openstack release str: {}'.format(
- self._get_openstack_release_string()))
-
- # Authenticate admin with keystone endpoint
- self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
- user='admin',
- password='openstack',
- tenant='admin')
-
- # Authenticate admin with ceilometer endpoint
- ep = self.keystone.service_catalog.url_for(service_type='metering',
- endpoint_type='publicURL')
- os_token = self.keystone.auth_token
- self.log.debug('Instantiating ceilometer client...')
- self.ceil = ceilo_client.Client(endpoint=ep, token=os_token)
-
- def _run_action(self, unit_id, action, *args):
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- command.extend(args)
- print("Running command: %s\n" % " ".join(command))
- output = subprocess.check_output(command)
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- action_id = data[u'Action queued with id']
- return action_id
-
- def _wait_on_action(self, action_id):
- command = ["juju", "action", "fetch", "--format=json", action_id]
- while True:
- try:
- output = subprocess.check_output(command)
- except Exception as e:
- print(e)
- return False
- output_json = output.decode(encoding="UTF-8")
- data = json.loads(output_json)
- if data[u"status"] == "completed":
- return True
- elif data[u"status"] == "failed":
- return False
- time.sleep(2)
-
- def test_100_services(self):
- """Verify the expected services are running on the corresponding
- service units."""
- u.log.debug('Checking system services on units...')
-
- ceilometer_svcs = [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-agent-notification',
- ]
-
- if self._get_openstack_release() < self.trusty_mitaka:
- ceilometer_svcs.append('ceilometer-alarm-evaluator')
- ceilometer_svcs.append('ceilometer-alarm-notifier')
-
- service_names = {
- self.ceil_sentry: ceilometer_svcs,
- }
-
- ret = u.validate_services_by_name(service_names)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_110_service_catalog(self):
- """Verify that the service catalog endpoint data is valid."""
- u.log.debug('Checking keystone service catalog data...')
- endpoint_check = {
- 'adminURL': u.valid_url,
- 'id': u.not_null,
- 'region': 'RegionOne',
- 'publicURL': u.valid_url,
- 'internalURL': u.valid_url
- }
- expected = {
- 'metering': [endpoint_check],
- 'identity': [endpoint_check]
- }
- actual = self.keystone.service_catalog.get_endpoints()
-
- ret = u.validate_svc_catalog_endpoint_data(expected, actual)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_112_keystone_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking keystone api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- internal_port = public_port = '5000'
- admin_port = '35357'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Keystone endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_114_ceilometer_api_endpoint(self):
- """Verify the ceilometer api endpoint data."""
- u.log.debug('Checking ceilometer api endpoint data...')
- endpoints = self.keystone.endpoints.list()
- u.log.debug(endpoints)
- admin_port = internal_port = public_port = '8777'
- expected = {'id': u.not_null,
- 'region': 'RegionOne',
- 'adminurl': u.valid_url,
- 'internalurl': u.valid_url,
- 'publicurl': u.valid_url,
- 'service_id': u.not_null}
-
- ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
- public_port, expected)
- if ret:
- message = 'Ceilometer endpoint: {}'.format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_200_ceilometer_identity_relation(self):
- """Verify the ceilometer to keystone identity-service relation data"""
- u.log.debug('Checking ceilometer to keystone identity-service '
- 'relation data...')
- unit = self.ceil_sentry
- relation = ['identity-service', 'keystone:identity-service']
- ceil_ip = unit.relation('identity-service',
- 'keystone:identity-service')['private-address']
- ceil_endpoint = "http://%s:8777" % (ceil_ip)
-
- expected = {
- 'admin_url': ceil_endpoint,
- 'internal_url': ceil_endpoint,
- 'private-address': ceil_ip,
- 'public_url': ceil_endpoint,
- 'region': 'RegionOne',
- 'requested_roles': 'ResellerAdmin',
- 'service': 'ceilometer',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_201_keystone_ceilometer_identity_relation(self):
- """Verify the keystone to ceilometer identity-service relation data"""
- u.log.debug('Checking keystone:ceilometer identity relation data...')
- unit = self.keystone_sentry
- relation = ['identity-service', 'ceilometer:identity-service']
- id_relation = unit.relation('identity-service',
- 'ceilometer:identity-service')
- id_ip = id_relation['private-address']
- expected = {
- 'admin_token': 'ubuntutesting',
- 'auth_host': id_ip,
- 'auth_port': "35357",
- 'auth_protocol': 'http',
- 'private-address': id_ip,
- 'service_host': id_ip,
- 'service_password': u.not_null,
- 'service_port': "5000",
- 'service_protocol': 'http',
- 'service_tenant': 'services',
- 'service_tenant_id': u.not_null,
- 'service_username': 'ceilometer',
- }
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('keystone identity-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_202_keystone_ceilometer_identity_notes_relation(self):
- """Verify ceilometer to keystone identity-notifications relation"""
- u.log.debug('Checking keystone:ceilometer '
- 'identity-notifications relation data...')
-
- # Relation data may vary depending on timing of hooks and relations.
- # May be glance- or keystone- or another endpoint-changed value, so
- # check that at least one ???-endpoint-changed value exists.
- unit = self.keystone_sentry
- relation_data = unit.relation('identity-service',
- 'ceilometer:identity-notifications')
-
- expected = '-endpoint-changed'
- found = 0
- for key in relation_data.keys():
- if expected in key and relation_data[key]:
- found += 1
- u.log.debug('{}: {}'.format(key, relation_data[key]))
-
- if not found:
- message = ('keystone:ceilometer identity-notification relation '
- 'error\n expected something like: {}\n actual: '
- '{}'.format(expected, relation_data))
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_203_ceilometer_amqp_relation(self):
- """Verify the ceilometer to rabbitmq-server amqp relation data"""
- u.log.debug('Checking ceilometer:rabbitmq amqp relation data...')
- unit = self.ceil_sentry
- relation = ['amqp', 'rabbitmq-server:amqp']
- expected = {
- 'username': 'ceilometer',
- 'private-address': u.valid_ip,
- 'vhost': 'openstack'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_204_amqp_ceilometer_relation(self):
- """Verify the rabbitmq-server to ceilometer amqp relation data"""
- u.log.debug('Checking rabbitmq:ceilometer amqp relation data...')
- unit = self.rabbitmq_sentry
- relation = ['amqp', 'ceilometer:amqp']
- expected = {
- 'hostname': u.valid_ip,
- 'private-address': u.valid_ip,
- 'password': u.not_null,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('rabbitmq amqp', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_205_ceilometer_to_mongodb_relation(self):
- """Verify the ceilometer to mongodb relation data"""
- u.log.debug('Checking ceilometer:mongodb relation data...')
- unit = self.ceil_sentry
- relation = ['shared-db', 'mongodb:database']
- expected = {
- 'ceilometer_database': 'ceilometer',
- 'private-address': u.valid_ip,
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer shared-db', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_206_mongodb_to_ceilometer_relation(self):
- """Verify the mongodb to ceilometer relation data"""
- u.log.debug('Checking mongodb:ceilometer relation data...')
- unit = self.mongodb_sentry
- relation = ['database', 'ceilometer:shared-db']
- expected = {
- 'hostname': u.valid_ip,
- 'port': '27017',
- 'private-address': u.valid_ip,
- 'type': 'database',
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('mongodb database', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_207_ceilometer_ceilometer_agent_relation(self):
- """Verify the ceilometer to ceilometer-agent relation data"""
- u.log.debug('Checking ceilometer:ceilometer-agent relation data...')
- unit = self.ceil_sentry
- relation = ['ceilometer-service',
- 'ceilometer-agent:ceilometer-service']
- expected = {
- 'rabbitmq_user': 'ceilometer',
- 'verbose': 'False',
- 'rabbitmq_host': u.valid_ip,
- 'service_ports': "{'ceilometer_api': [8777, 8767]}",
- 'use_syslog': 'False',
- 'metering_secret': u.not_null,
- 'rabbitmq_virtual_host': 'openstack',
- 'db_port': '27017',
- 'private-address': u.valid_ip,
- 'db_name': 'ceilometer',
- 'db_host': u.valid_ip,
- 'debug': 'False',
- 'rabbitmq_password': u.not_null,
- 'port': '8767'
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_208_ceilometer_agent_ceilometer_relation(self):
- """Verify the ceilometer-agent to ceilometer relation data"""
- u.log.debug('Checking ceilometer-agent:ceilometer relation data...')
- unit = self.ceil_agent_sentry
- relation = ['ceilometer-service', 'ceilometer:ceilometer-service']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_209_nova_compute_ceilometer_agent_relation(self):
- """Verify the nova-compute to ceilometer relation data"""
- u.log.debug('Checking nova-compute:ceilometer relation data...')
- unit = self.nova_sentry
- relation = ['nova-ceilometer', 'ceilometer-agent:nova-ceilometer']
- expected = {'private-address': u.valid_ip}
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_210_ceilometer_agent_nova_compute_relation(self):
- """Verify the ceilometer to nova-compute relation data"""
- u.log.debug('Checking ceilometer:nova-compute relation data...')
- unit = self.ceil_agent_sentry
- relation = ['nova-ceilometer', 'nova-compute:nova-ceilometer']
- sub = ('{"nova": {"/etc/nova/nova.conf": {"sections": {"DEFAULT": '
- '[["instance_usage_audit", "True"], '
- '["instance_usage_audit_period", "hour"], '
- '["notify_on_state_change", "vm_and_task_state"], '
- '["notification_driver", "ceilometer.compute.nova_notifier"], '
- '["notification_driver", '
- '"nova.openstack.common.notifier.rpc_notifier"]]}}}}')
- expected = {
- 'subordinate_configuration': sub,
- 'private-address': u.valid_ip
- }
-
- ret = u.validate_relation_data(unit, relation, expected)
- if ret:
- message = u.relation_error('ceilometer-service', ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_300_ceilometer_config(self):
- """Verify the data in the ceilometer config file."""
- u.log.debug('Checking ceilometer config file data...')
- unit = self.ceil_sentry
- ks_rel = self.keystone_sentry.relation('identity-service',
- 'ceilometer:identity-service')
- auth_uri = '%s://%s:%s/' % (ks_rel['service_protocol'],
- ks_rel['service_host'],
- ks_rel['service_port'])
- db_relation = self.mongodb_sentry.relation('database',
- 'ceilometer:shared-db')
- db_conn = 'mongodb://%s:%s/ceilometer' % (db_relation['hostname'],
- db_relation['port'])
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- },
- 'api': {
- 'port': '8767',
- },
- 'service_credentials': {
- 'os_auth_url': auth_uri + 'v2.0',
- 'os_tenant_name': 'services',
- 'os_username': 'ceilometer',
- 'os_password': ks_rel['service_password'],
- },
- 'database': {
- 'connection': db_conn,
- },
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_301_nova_config(self):
- """Verify data in the nova compute nova config file"""
- u.log.debug('Checking nova compute config file...')
- unit = self.nova_sentry
- conf = '/etc/nova/nova.conf'
- expected = {
- 'DEFAULT': {
- 'verbose': 'False',
- 'debug': 'False',
- 'use_syslog': 'False',
- 'my_ip': u.valid_ip,
- }
- }
-
- # NOTE(beisner): notification_driver is not checked like the
- # others, as configparser does not support duplicate config
- # options, and dicts cant have duplicate keys.
- # Ex. from conf file:
- # notification_driver = ceilometer.compute.nova_notifier
- # notification_driver = nova.openstack.common.notifier.rpc_notifier
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- # Check notification_driver existence via simple grep cmd
- lines = [('notification_driver = '
- 'ceilometer.compute.nova_notifier'),
- ('notification_driver = '
- 'nova.openstack.common.notifier.rpc_notifier')]
-
- sentry_units = [unit]
- cmds = []
- for line in lines:
- cmds.append('grep "{}" {}'.format(line, conf))
-
- ret = u.check_commands_on_units(cmds, sentry_units)
- if ret:
- amulet.raise_status(amulet.FAIL, msg=ret)
-
- u.log.debug('OK')
-
- def test_302_nova_ceilometer_config(self):
- """Verify data in the ceilometer config file on the
- nova-compute (ceilometer-agent) unit."""
- u.log.debug('Checking nova ceilometer config file...')
- unit = self.nova_sentry
- conf = '/etc/ceilometer/ceilometer.conf'
- expected = {
- 'DEFAULT': {
- 'logdir': '/var/log/ceilometer'
- },
- 'database': {
- 'backend': 'sqlalchemy',
- 'connection': 'sqlite:////var/lib/ceilometer/$sqlite_db'
- }
- }
-
- for section, pairs in expected.iteritems():
- ret = u.validate_config_data(unit, conf, section, pairs)
- if ret:
- message = "ceilometer config error: {}".format(ret)
- amulet.raise_status(amulet.FAIL, msg=message)
-
- u.log.debug('OK')
-
- def test_400_api_connection(self):
- """Simple api calls to check service is up and responding"""
- u.log.debug('Checking api functionality...')
- assert(self.ceil.samples.list() == [])
- assert(self.ceil.meters.list() == [])
- u.log.debug('OK')
-
- # NOTE(beisner): need to add more functional tests
-
- def test_900_restart_on_config_change(self):
- """Verify that the specified services are restarted when the config
- is changed.
- """
- sentry = self.ceil_sentry
- juju_service = 'ceilometer'
-
- # Expected default and alternate values
- set_default = {'debug': 'False'}
- set_alternate = {'debug': 'True'}
-
- # Services which are expected to restart upon config change,
- # and corresponding config files affected by the change
- conf_file = '/etc/ceilometer/ceilometer.conf'
- services = {
- 'ceilometer-collector': conf_file,
- 'ceilometer-api': conf_file,
- 'ceilometer-agent-notification': conf_file,
- }
-
- if self._get_openstack_release() < self.trusty_mitaka:
- services['ceilometer-alarm-notifier'] = conf_file
- services['ceilometer-alarm-evaluator'] = conf_file
-
- if self._get_openstack_release() == self.trusty_liberty or \
- self._get_openstack_release() >= self.wily_liberty:
- # Liberty and later
- services['ceilometer-polling'] = conf_file
- else:
- # Juno and earlier
- services['ceilometer-agent-central'] = conf_file
-
- # Make config change, check for service restarts
- u.log.debug('Making config change on {}...'.format(juju_service))
- mtime = u.get_sentry_time(sentry)
- self.d.configure(juju_service, set_alternate)
-
- sleep_time = 40
- for s, conf_file in services.iteritems():
- u.log.debug("Checking that service restarted: {}".format(s))
- if not u.validate_service_config_changed(sentry, mtime, s,
- conf_file,
- retry_count=4,
- retry_sleep_time=20,
- sleep_time=sleep_time):
- self.d.configure(juju_service, set_default)
- msg = "service {} didn't restart after config change".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
- sleep_time = 0
-
- self.d.configure(juju_service, set_default)
- u.log.debug('OK')
-
- def test_910_pause_and_resume(self):
- """The services can be paused and resumed. """
- u.log.debug('Checking pause and resume actions...')
- unit_name = "ceilometer/0"
- unit = self.d.sentry.unit[unit_name]
- juju_service = 'ceilometer'
-
- assert u.status_get(unit)[0] == "active"
-
- action_id = self._run_action(unit_name, "pause")
- assert self._wait_on_action(action_id), "Pause action failed."
- assert u.status_get(unit)[0] == "maintenance"
-
- # trigger config-changed to ensure that services are still stopped
- u.log.debug("Making config change on ceilometer ...")
- self.d.configure(juju_service, {'debug': 'True'})
- assert u.status_get(unit)[0] == "maintenance"
- self.d.configure(juju_service, {'debug': 'False'})
- assert u.status_get(unit)[0] == "maintenance"
-
- action_id = self._run_action(unit_name, "resume")
- assert self._wait_on_action(action_id), "Resume action failed."
- assert u.status_get(unit)[0] == "active"
- u.log.debug('OK')
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py
deleted file mode 100644
index d451698..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/deployment.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import os
-import six
-
-
-class AmuletDeployment(object):
- """Amulet deployment.
-
- This class provides generic Amulet deployment and test runner
- methods.
- """
-
- def __init__(self, series=None):
- """Initialize the deployment environment."""
- self.series = None
-
- if series:
- self.series = series
- self.d = amulet.Deployment(series=self.series)
- else:
- self.d = amulet.Deployment()
-
- def _add_services(self, this_service, other_services):
- """Add services.
-
- Add services to the deployment where this_service is the local charm
- that we're testing and other_services are the other services that
- are being used in the local amulet tests.
- """
- if this_service['name'] != os.path.basename(os.getcwd()):
- s = this_service['name']
- msg = "The charm's root directory name needs to be {}".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- if 'units' not in this_service:
- this_service['units'] = 1
-
- self.d.add(this_service['name'], units=this_service['units'],
- constraints=this_service.get('constraints'))
-
- for svc in other_services:
- if 'location' in svc:
- branch_location = svc['location']
- elif self.series:
- branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
- else:
- branch_location = None
-
- if 'units' not in svc:
- svc['units'] = 1
-
- self.d.add(svc['name'], charm=branch_location, units=svc['units'],
- constraints=svc.get('constraints'))
-
- def _add_relations(self, relations):
- """Add all of the relations for the services."""
- for k, v in six.iteritems(relations):
- self.d.relate(k, v)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _deploy(self):
- """Deploy environment and wait for all hooks to finish executing."""
- try:
- self.d.setup(timeout=900)
- self.d.sentry.wait(timeout=900)
- except amulet.helpers.TimeoutError:
- amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
- except Exception:
- raise
-
- def run_tests(self):
- """Run all of the methods that are prefixed with 'test_'."""
- for test in dir(self):
- if test.startswith('test_'):
- getattr(self, test)()
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py
deleted file mode 100644
index 7e5c25a..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/amulet/utils.py
+++ /dev/null
@@ -1,829 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import json
-import logging
-import os
-import re
-import socket
-import subprocess
-import sys
-import time
-import uuid
-
-import amulet
-import distro_info
-import six
-from six.moves import configparser
-if six.PY3:
- from urllib import parse as urlparse
-else:
- import urlparse
-
-
-class AmuletUtils(object):
- """Amulet utilities.
-
- This class provides common utility functions that are used by Amulet
- tests.
- """
-
- def __init__(self, log_level=logging.ERROR):
- self.log = self.get_logger(level=log_level)
- self.ubuntu_releases = self.get_ubuntu_releases()
-
- def get_logger(self, name="amulet-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def valid_ip(self, ip):
- if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
- return True
- else:
- return False
-
- def valid_url(self, url):
- p = re.compile(
- r'^(?:http|ftp)s?://'
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
- r'localhost|'
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
- r'(?::\d+)?'
- r'(?:/?|[/?]\S+)$',
- re.IGNORECASE)
- if p.match(url):
- return True
- else:
- return False
-
- def get_ubuntu_release_from_sentry(self, sentry_unit):
- """Get Ubuntu release codename from sentry unit.
-
- :param sentry_unit: amulet sentry/service unit pointer
- :returns: list of strings - release codename, failure message
- """
- msg = None
- cmd = 'lsb_release -cs'
- release, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} lsb_release: {}'.format(
- sentry_unit.info['unit_name'], release))
- else:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, release, code))
- if release not in self.ubuntu_releases:
- msg = ("Release ({}) not found in Ubuntu releases "
- "({})".format(release, self.ubuntu_releases))
- return release, msg
-
- def validate_services(self, commands):
- """Validate that lists of commands succeed on service units. Can be
- used to verify system services are running on the corresponding
- service units.
-
- :param commands: dict with sentry keys and arbitrary command list vals
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # /!\ DEPRECATION WARNING (beisner):
- # New and existing tests should be rewritten to use
- # validate_services_by_name() as it is aware of init systems.
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_services_by_name instead of validate_services '
- 'due to init system differences.')
-
- for k, v in six.iteritems(commands):
- for cmd in v:
- output, code = k.run(cmd)
- self.log.debug('{} `{}` returned '
- '{}'.format(k.info['unit_name'],
- cmd, code))
- if code != 0:
- return "command `{}` returned {}".format(cmd, str(code))
- return None
-
- def validate_services_by_name(self, sentry_services):
- """Validate system service status by service name, automatically
- detecting init system based on Ubuntu release codename.
-
- :param sentry_services: dict with sentry keys and svc list values
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # Point at which systemd became a thing
- systemd_switch = self.ubuntu_releases.index('vivid')
-
- for sentry_unit, services_list in six.iteritems(sentry_services):
- # Get lsb_release codename from unit
- release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
- if ret:
- return ret
-
- for service_name in services_list:
- if (self.ubuntu_releases.index(release) >= systemd_switch or
- service_name in ['rabbitmq-server', 'apache2']):
- # init is systemd (or regular sysv)
- cmd = 'sudo service {} status'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0
- elif self.ubuntu_releases.index(release) < systemd_switch:
- # init is upstart
- cmd = 'sudo status {}'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0 and "start/running" in output
-
- self.log.debug('{} `{}` returned '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code))
- if not service_running:
- return u"command `{}` returned {} {}".format(
- cmd, output, str(code))
- return None
-
- def _get_config(self, unit, filename):
- """Get a ConfigParser object for parsing a unit's config file."""
- file_contents = unit.file_contents(filename)
-
- # NOTE(beisner): by default, ConfigParser does not handle options
- # with no value, such as the flags used in the mysql my.cnf file.
- # https://bugs.python.org/issue7005
- config = configparser.ConfigParser(allow_no_value=True)
- config.readfp(io.StringIO(file_contents))
- return config
-
- def validate_config_data(self, sentry_unit, config_file, section,
- expected):
- """Validate config file data.
-
- Verify that the specified section of the config file contains
- the expected option key:value pairs.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('Validating config file data ({} in {} on {})'
- '...'.format(section, config_file,
- sentry_unit.info['unit_name']))
- config = self._get_config(sentry_unit, config_file)
-
- if section != 'DEFAULT' and not config.has_section(section):
- return "section [{}] does not exist".format(section)
-
- for k in expected.keys():
- if not config.has_option(section, k):
- return "section [{}] is missing option {}".format(section, k)
-
- actual = config.get(section, k)
- v = expected[k]
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if actual != v:
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual):
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- return None
-
- def _validate_dict_data(self, expected, actual):
- """Validate dictionary data.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('actual: {}'.format(repr(actual)))
- self.log.debug('expected: {}'.format(repr(expected)))
-
- for k, v in six.iteritems(expected):
- if k in actual:
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if v != actual[k]:
- return "{}:{}".format(k, actual[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual[k]):
- return "{}:{}".format(k, actual[k])
- else:
- return "key '{}' does not exist".format(k)
- return None
-
- def validate_relation_data(self, sentry_unit, relation, expected):
- """Validate actual relation data based on expected relation data."""
- actual = sentry_unit.relation(relation[0], relation[1])
- return self._validate_dict_data(expected, actual)
-
- def _validate_list_data(self, expected, actual):
- """Compare expected list vs actual list data."""
- for e in expected:
- if e not in actual:
- return "expected item {} not found in actual list".format(e)
- return None
-
- def not_null(self, string):
- if string is not None:
- return True
- else:
- return False
-
- def _get_file_mtime(self, sentry_unit, filename):
- """Get last modification time of file."""
- return sentry_unit.file_stat(filename)['mtime']
-
- def _get_dir_mtime(self, sentry_unit, directory):
- """Get last modification time of directory."""
- return sentry_unit.directory_stat(directory)['mtime']
-
- def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
- """Get start time of a process based on the last modification time
- of the /proc/pid directory.
-
- :sentry_unit: The sentry unit to check for the service on
- :service: service name to look for in process table
- :pgrep_full: [Deprecated] Use full command line search mode with pgrep
- :returns: epoch time of service process start
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- if pgrep_full is not None:
- # /!\ DEPRECATION WARNING (beisner):
- # No longer implemented, as pidof is now used instead of pgrep.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
- 'longer implemented re: lp 1474030.')
-
- pid_list = self.get_process_id_list(sentry_unit, service)
- pid = pid_list[0]
- proc_dir = '/proc/{}'.format(pid)
- self.log.debug('Pid for {} on {}: {}'.format(
- service, sentry_unit.info['unit_name'], pid))
-
- return self._get_dir_mtime(sentry_unit, proc_dir)
-
- def service_restarted(self, sentry_unit, service, filename,
- pgrep_full=None, sleep_time=20):
- """Check if service was restarted.
-
- Compare a service's start time vs a file's last modification time
- (such as a config file for that service) to determine if the service
- has been restarted.
- """
- # /!\ DEPRECATION WARNING (beisner):
- # This method is prone to races in that no before-time is known.
- # Use validate_service_config_changed instead.
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_service_config_changed instead of '
- 'service_restarted due to known races.')
-
- time.sleep(sleep_time)
- if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
- self._get_file_mtime(sentry_unit, filename)):
- return True
- else:
- return False
-
- def service_restarted_since(self, sentry_unit, mtime, service,
- pgrep_full=None, sleep_time=20,
- retry_count=30, retry_sleep_time=10):
- """Check if service was been started after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if service found and its start time it newer than mtime,
- False if service is older than mtime or if service was
- not found.
- """
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s service restarted since %s on '
- '%s' % (service, mtime, unit_name))
- time.sleep(sleep_time)
- proc_start_time = None
- tries = 0
- while tries <= retry_count and not proc_start_time:
- try:
- proc_start_time = self._get_proc_start_time(sentry_unit,
- service,
- pgrep_full)
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'OK'.format(tries, service, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, proc may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'failed\n{}'.format(tries, service,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not proc_start_time:
- self.log.warn('No proc start time found, assuming service did '
- 'not start')
- return False
- if proc_start_time >= mtime:
- self.log.debug('Proc start time is newer than provided mtime'
- '(%s >= %s) on %s (OK)' % (proc_start_time,
- mtime, unit_name))
- return True
- else:
- self.log.warn('Proc start time (%s) is older than provided mtime '
- '(%s) on %s, service did not '
- 'restart' % (proc_start_time, mtime, unit_name))
- return False
-
- def config_updated_since(self, sentry_unit, filename, mtime,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check if file was modified after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check the file mtime on
- filename (string): The file to check mtime of
- mtime (float): The epoch time to check against
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if file was modified more recently than mtime, False if
- file was modified before mtime, or if file not found.
- """
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s updated since %s on '
- '%s' % (filename, mtime, unit_name))
- time.sleep(sleep_time)
- file_mtime = None
- tries = 0
- while tries <= retry_count and not file_mtime:
- try:
- file_mtime = self._get_file_mtime(sentry_unit, filename)
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'OK'.format(tries, filename, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, file may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'failed\n{}'.format(tries, filename,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not file_mtime:
- self.log.warn('Could not determine file mtime, assuming '
- 'file does not exist')
- return False
-
- if file_mtime >= mtime:
- self.log.debug('File mtime is newer than provided mtime '
- '(%s >= %s) on %s (OK)' % (file_mtime,
- mtime, unit_name))
- return True
- else:
- self.log.warn('File mtime is older than provided mtime'
- '(%s < on %s) on %s' % (file_mtime,
- mtime, unit_name))
- return False
-
- def validate_service_config_changed(self, sentry_unit, mtime, service,
- filename, pgrep_full=None,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check service and file were updated after mtime
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- filename (string): The file to check mtime of
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep in seconds to pass to test helpers
- retry_count (int): If service is not found, how many times to retry
- retry_sleep_time (int): Time in seconds to wait between retries
-
- Typical Usage:
- u = OpenStackAmuletUtils(ERROR)
- ...
- mtime = u.get_sentry_time(self.cinder_sentry)
- self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
- if not u.validate_service_config_changed(self.cinder_sentry,
- mtime,
- 'cinder-api',
- '/etc/cinder/cinder.conf')
- amulet.raise_status(amulet.FAIL, msg='update failed')
- Returns:
- bool: True if both service and file where updated/restarted after
- mtime, False if service is older than mtime or if service was
- not found or if filename was modified before mtime.
- """
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- service_restart = self.service_restarted_since(
- sentry_unit, mtime,
- service,
- pgrep_full=pgrep_full,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- config_update = self.config_updated_since(
- sentry_unit,
- filename,
- mtime,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- return service_restart and config_update
-
- def get_sentry_time(self, sentry_unit):
- """Return current epoch time on a sentry"""
- cmd = "date +'%s'"
- return float(sentry_unit.run(cmd)[0])
-
- def relation_error(self, name, data):
- return 'unexpected relation data in {} - {}'.format(name, data)
-
- def endpoint_error(self, name, data):
- return 'unexpected endpoint data in {} - {}'.format(name, data)
-
- def get_ubuntu_releases(self):
- """Return a list of all Ubuntu releases in order of release."""
- _d = distro_info.UbuntuDistroInfo()
- _release_list = _d.all
- return _release_list
-
- def file_to_url(self, file_rel_path):
- """Convert a relative file path to a file URL."""
- _abs_path = os.path.abspath(file_rel_path)
- return urlparse.urlparse(_abs_path, scheme='file').geturl()
-
- def check_commands_on_units(self, commands, sentry_units):
- """Check that all commands in a list exit zero on all
- sentry units in a list.
-
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- self.log.debug('Checking exit codes for {} commands on {} '
- 'sentry units...'.format(len(commands),
- len(sentry_units)))
- for sentry_unit in sentry_units:
- for cmd in commands:
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- return ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- return None
-
- def get_process_id_list(self, sentry_unit, process_name,
- expect_success=True):
- """Get a list of process ID(s) from a single sentry juju unit
- for a single process name.
-
- :param sentry_unit: Amulet sentry instance (juju unit)
- :param process_name: Process name
- :param expect_success: If False, expect the PID to be missing,
- raise if it is present.
- :returns: List of process IDs
- """
- cmd = 'pidof -x {}'.format(process_name)
- if not expect_success:
- cmd += " || exit 0 && exit 1"
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output).split()
-
- def get_unit_process_ids(self, unit_processes, expect_success=True):
- """Construct a dict containing unit sentries, process names, and
- process IDs.
-
- :param unit_processes: A dictionary of Amulet sentry instance
- to list of process names.
- :param expect_success: if False expect the processes to not be
- running, raise if they are.
- :returns: Dictionary of Amulet sentry instance to dictionary
- of process names to PIDs.
- """
- pid_dict = {}
- for sentry_unit, process_list in six.iteritems(unit_processes):
- pid_dict[sentry_unit] = {}
- for process in process_list:
- pids = self.get_process_id_list(
- sentry_unit, process, expect_success=expect_success)
- pid_dict[sentry_unit].update({process: pids})
- return pid_dict
-
- def validate_unit_process_ids(self, expected, actual):
- """Validate process id quantities for services on units."""
- self.log.debug('Checking units for running processes...')
- self.log.debug('Expected PIDs: {}'.format(expected))
- self.log.debug('Actual PIDs: {}'.format(actual))
-
- if len(actual) != len(expected):
- return ('Unit count mismatch. expected, actual: {}, '
- '{} '.format(len(expected), len(actual)))
-
- for (e_sentry, e_proc_names) in six.iteritems(expected):
- e_sentry_name = e_sentry.info['unit_name']
- if e_sentry in actual.keys():
- a_proc_names = actual[e_sentry]
- else:
- return ('Expected sentry ({}) not found in actual dict data.'
- '{}'.format(e_sentry_name, e_sentry))
-
- if len(e_proc_names.keys()) != len(a_proc_names.keys()):
- return ('Process name count mismatch. expected, actual: {}, '
- '{}'.format(len(expected), len(actual)))
-
- for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
- zip(e_proc_names.items(), a_proc_names.items()):
- if e_proc_name != a_proc_name:
- return ('Process name mismatch. expected, actual: {}, '
- '{}'.format(e_proc_name, a_proc_name))
-
- a_pids_length = len(a_pids)
- fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
- '{}, {} ({})'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids_length,
- a_pids))
-
- # If expected is a list, ensure at least one PID quantity match
- if isinstance(e_pids, list) and \
- a_pids_length not in e_pids:
- return fail_msg
- # If expected is not bool and not list,
- # ensure PID quantities match
- elif not isinstance(e_pids, bool) and \
- not isinstance(e_pids, list) and \
- a_pids_length != e_pids:
- return fail_msg
- # If expected is bool True, ensure 1 or more PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is True and a_pids_length < 1:
- return fail_msg
- # If expected is bool False, ensure 0 PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is False and a_pids_length != 0:
- return fail_msg
- else:
- self.log.debug('PID check OK: {} {} {}: '
- '{}'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids))
- return None
-
- def validate_list_of_identical_dicts(self, list_of_dicts):
- """Check that all dicts within a list are identical."""
- hashes = []
- for _dict in list_of_dicts:
- hashes.append(hash(frozenset(_dict.items())))
-
- self.log.debug('Hashes: {}'.format(hashes))
- if len(set(hashes)) == 1:
- self.log.debug('Dicts within list are identical')
- else:
- return 'Dicts within list are not identical'
-
- return None
-
- def validate_sectionless_conf(self, file_contents, expected):
- """A crude conf parser. Useful to inspect configuration files which
- do not have section headers (as would be necessary in order to use
- the configparser). Such as openstack-dashboard or rabbitmq confs."""
- for line in file_contents.split('\n'):
- if '=' in line:
- args = line.split('=')
- if len(args) <= 1:
- continue
- key = args[0].strip()
- value = args[1].strip()
- if key in expected.keys():
- if expected[key] != value:
- msg = ('Config mismatch. Expected, actual: {}, '
- '{}'.format(expected[key], value))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def get_unit_hostnames(self, units):
- """Return a dict of juju unit names to hostnames."""
- host_names = {}
- for unit in units:
- host_names[unit.info['unit_name']] = \
- str(unit.file_contents('/etc/hostname').strip())
- self.log.debug('Unit host names: {}'.format(host_names))
- return host_names
-
- def run_cmd_unit(self, sentry_unit, cmd):
- """Run a command on a unit, return the output and exit code."""
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` command returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- msg = ('{} `{}` command returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output), code
-
- def file_exists_on_unit(self, sentry_unit, file_name):
- """Check if a file exists on a unit."""
- try:
- sentry_unit.file_stat(file_name)
- return True
- except IOError:
- return False
- except Exception as e:
- msg = 'Error checking file {}: {}'.format(file_name, e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def file_contents_safe(self, sentry_unit, file_name,
- max_wait=60, fatal=False):
- """Get file contents from a sentry unit. Wrap amulet file_contents
- with retry logic to address races where a file checks as existing,
- but no longer exists by the time file_contents is called.
- Return None if file not found. Optionally raise if fatal is True."""
- unit_name = sentry_unit.info['unit_name']
- file_contents = False
- tries = 0
- while not file_contents and tries < (max_wait / 4):
- try:
- file_contents = sentry_unit.file_contents(file_name)
- except IOError:
- self.log.debug('Attempt {} to open file {} from {} '
- 'failed'.format(tries, file_name,
- unit_name))
- time.sleep(4)
- tries += 1
-
- if file_contents:
- return file_contents
- elif not fatal:
- return None
- elif fatal:
- msg = 'Failed to get file contents from unit.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def port_knock_tcp(self, host="localhost", port=22, timeout=15):
- """Open a TCP socket to check for a listening sevice on a host.
-
- :param host: host name or IP address, default to localhost
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :returns: True if successful, False if connect failed
- """
-
- # Resolve host name if possible
- try:
- connect_host = socket.gethostbyname(host)
- host_human = "{} ({})".format(connect_host, host)
- except socket.error as e:
- self.log.warn('Unable to resolve address: '
- '{} ({}) Trying anyway!'.format(host, e))
- connect_host = host
- host_human = connect_host
-
- # Attempt socket connection
- try:
- knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- knock.settimeout(timeout)
- knock.connect((connect_host, port))
- knock.close()
- self.log.debug('Socket connect OK for host '
- '{} on port {}.'.format(host_human, port))
- return True
- except socket.error as e:
- self.log.debug('Socket connect FAIL for'
- ' {} port {} ({})'.format(host_human, port, e))
- return False
-
- def port_knock_units(self, sentry_units, port=22,
- timeout=15, expect_success=True):
- """Open a TCP socket to check for a listening sevice on each
- listed juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :expect_success: True by default, set False to invert logic
- :returns: None if successful, Failure message otherwise
- """
- for unit in sentry_units:
- host = unit.info['public-address']
- connected = self.port_knock_tcp(host, port, timeout)
- if not connected and expect_success:
- return 'Socket connect failed.'
- elif connected and not expect_success:
- return 'Socket connected unexpectedly.'
-
- def get_uuid_epoch_stamp(self):
- """Returns a stamp string based on uuid4 and epoch time. Useful in
- generating test messages which need to be unique-ish."""
- return '[{}-{}]'.format(uuid.uuid4(), time.time())
-
-# amulet juju action helpers:
- def run_action(self, unit_sentry, action,
- _check_output=subprocess.check_output,
- params=None):
- """Run the named action on a given unit sentry.
-
- params a dict of parameters to use
- _check_output parameter is used for dependency injection.
-
- @return action_id.
- """
- unit_id = unit_sentry.info["unit_name"]
- command = ["juju", "action", "do", "--format=json", unit_id, action]
- if params is not None:
- for key, value in params.iteritems():
- command.append("{}={}".format(key, value))
- self.log.info("Running command: %s\n" % " ".join(command))
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- action_id = data[u'Action queued with id']
- return action_id
-
- def wait_on_action(self, action_id, _check_output=subprocess.check_output):
- """Wait for a given action, returning if it completed or not.
-
- _check_output parameter is used for dependency injection.
- """
- command = ["juju", "action", "fetch", "--format=json", "--wait=0",
- action_id]
- output = _check_output(command, universal_newlines=True)
- data = json.loads(output)
- return data.get(u"status") == "completed"
-
- def status_get(self, unit):
- """Return the current service status of this unit."""
- raw_status, return_code = unit.run(
- "status-get --format=json --include-data")
- if return_code != 0:
- return ("unknown", "")
- status = json.loads(raw_status)
- return (status["status"], status["message"])
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index d21c9c7..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import logging
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
- 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
- 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- self.log.info('Waiting for extended status on units...')
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty, self.trusty_mitaka,
- self.xenial_mitaka) = range(14)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty,
- ('xenial', None): self.xenial_mitaka}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ('xenial', 'mitaka'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index ef3bdcc..0000000
--- a/charms/trusty/ceilometer/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1012 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-from keystoneclient.auth.identity import v3 as keystone_id_v3
-from keystoneclient import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-
-import novaclient.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- if not keystone_ip:
- keystone_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
- if not api_version or api_version == 2:
- ep = base_ep + "/v2.0"
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
- else:
- ep = base_ep + "/v3"
- auth = keystone_id_v3.Password(
- user_domain_name='admin_domain',
- username=user,
- password=password,
- domain_name='admin_domain',
- auth_url=ep,
- )
- sess = keystone_session.Session(auth=auth)
- return keystone_client_v3.Client(session=sess)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/ceilometer/tests/setup/00-setup b/charms/trusty/ceilometer/tests/setup/00-setup
deleted file mode 100755
index 658eb60..0000000
--- a/charms/trusty/ceilometer/tests/setup/00-setup
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-set -ex
-
-sudo add-apt-repository --yes ppa:juju/stable
-sudo apt-get update --yes
-sudo apt-get install --yes amulet \
- distro-info-data \
- python-ceilometerclient \
- python-cinderclient \
- python-distro-info \
- python-glanceclient \
- python-heatclient \
- python-keystoneclient \
- python-neutronclient \
- python-novaclient \
- python-pika \
- python-swiftclient
diff --git a/charms/trusty/ceilometer/tests/tests.yaml b/charms/trusty/ceilometer/tests/tests.yaml
deleted file mode 100644
index 4d17631..0000000
--- a/charms/trusty/ceilometer/tests/tests.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-bootstrap: true
-reset: false
-virtualenv: true
-makefile:
- - lint
- - test
-sources:
- - ppa:juju/stable
-packages:
- - amulet
- - distro-info-data
- - python-ceilometerclient
- - python-cinderclient
- - python-distro-info
- - python-glanceclient
- - python-heatclient
- - python-keystoneclient
- - python-neutronclient
- - python-novaclient
- - python-pika
- - python-swiftclient
diff --git a/charms/trusty/ceilometer/tox.ini b/charms/trusty/ceilometer/tox.ini
deleted file mode 100644
index c051dba..0000000
--- a/charms/trusty/ceilometer/tox.ini
+++ /dev/null
@@ -1,29 +0,0 @@
-[tox]
-envlist = pep8,py27
-skipsdist = True
-
-[testenv]
-setenv = VIRTUAL_ENV={envdir}
- PYTHONHASHSEED=0
-install_command =
- pip install --allow-unverified python-apt {opts} {packages}
-commands = ostestr {posargs}
-
-[testenv:py27]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-
-[testenv:pep8]
-basepython = python2.7
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands = flake8 {posargs} hooks unit_tests tests actions
- charm-proof
-
-[testenv:venv]
-commands = {posargs}
-
-[flake8]
-ignore = E402,E226
-exclude = hooks/charmhelpers
diff --git a/charms/trusty/ceilometer/unit_tests/__init__.py b/charms/trusty/ceilometer/unit_tests/__init__.py
deleted file mode 100644
index 53a4570..0000000
--- a/charms/trusty/ceilometer/unit_tests/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import sys
-sys.path.append('actions')
-sys.path.append('hooks')
diff --git a/charms/trusty/ceilometer/unit_tests/test_actions.py b/charms/trusty/ceilometer/unit_tests/test_actions.py
deleted file mode 100644
index 67643c8..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_actions.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import mock
-from mock import patch
-
-from test_utils import CharmTestCase
-
-with patch('ceilometer_utils.register_configs') as configs:
- configs.return_value = 'test-config'
- import actions
-
-
-class PauseTestCase(CharmTestCase):
-
- def setUp(self):
- super(PauseTestCase, self).setUp(
- actions, ["pause_unit_helper"])
-
- def test_pauses_services(self):
- actions.pause([])
- self.pause_unit_helper.assert_called_once_with('test-config')
-
-
-class ResumeTestCase(CharmTestCase):
-
- def setUp(self):
- super(ResumeTestCase, self).setUp(
- actions, ["resume_unit_helper"])
-
- def test_pauses_services(self):
- actions.resume([])
- self.resume_unit_helper.assert_called_once_with('test-config')
-
-
-class MainTestCase(CharmTestCase):
-
- def setUp(self):
- super(MainTestCase, self).setUp(actions, ["action_fail"])
-
- def test_invokes_action(self):
- dummy_calls = []
-
- def dummy_action(args):
- dummy_calls.append(True)
-
- with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
- actions.main(["foo"])
- self.assertEqual(dummy_calls, [True])
-
- def test_unknown_action(self):
- """Unknown actions aren't a traceback."""
- exit_string = actions.main(["foo"])
- self.assertEqual("Action foo undefined", exit_string)
-
- def test_failing_action(self):
- """Actions which traceback trigger action_fail() calls."""
- dummy_calls = []
-
- self.action_fail.side_effect = dummy_calls.append
-
- def dummy_action(args):
- raise ValueError("uh oh")
-
- with mock.patch.dict(actions.ACTIONS, {"foo": dummy_action}):
- actions.main(["foo"])
- self.assertEqual(dummy_calls, ["uh oh"])
diff --git a/charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py b/charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py
deleted file mode 100644
index 3babe21..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_actions_openstack_upgrade.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import sys
-
-from mock import patch, MagicMock
-
-# python-apt is not installed as part of test-requirements but is imported by
-# some charmhelpers modules so create a fake import.
-mock_apt = MagicMock()
-sys.modules['apt'] = mock_apt
-mock_apt.apt_pkg = MagicMock()
-
-os.environ['JUJU_UNIT_NAME'] = 'ceilometer'
-
-with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
- mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
- lambda *args, **kwargs: f(*args, **kwargs))
- with patch('ceilometer_utils.register_configs') as register_configs:
- with patch('ceilometer_utils.ceilometer_release_services'):
- import openstack_upgrade
-
-from test_utils import (
- CharmTestCase
-)
-
-TO_PATCH = [
- 'config_changed',
- 'do_openstack_upgrade',
-]
-
-
-class TestCeilometerUpgradeActions(CharmTestCase):
-
- def setUp(self):
- super(TestCeilometerUpgradeActions, self).setUp(openstack_upgrade,
- TO_PATCH)
-
- @patch('charmhelpers.contrib.openstack.utils.juju_log')
- @patch('charmhelpers.contrib.openstack.utils.config')
- @patch('charmhelpers.contrib.openstack.utils.action_set')
- @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
- @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
- def test_openstack_upgrade_true(self, upgrade_avail, git_requested,
- action_set, config, log):
- git_requested.return_value = False
- upgrade_avail.return_value = True
- config.return_value = True
-
- openstack_upgrade.openstack_upgrade()
-
- self.assertTrue(self.do_openstack_upgrade.called)
- self.assertTrue(self.config_changed.called)
-
- @patch('charmhelpers.contrib.openstack.utils.juju_log')
- @patch('charmhelpers.contrib.openstack.utils.config')
- @patch('charmhelpers.contrib.openstack.utils.action_set')
- @patch('charmhelpers.contrib.openstack.utils.git_install_requested')
- @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available')
- def test_openstack_upgrade_false(self, upgrade_avail, git_requested,
- action_set, config, log):
- git_requested.return_value = False
- upgrade_avail.return_value = True
- config.return_value = False
-
- openstack_upgrade.openstack_upgrade()
-
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertFalse(self.config_changed.called)
diff --git a/charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py b/charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py
deleted file mode 100644
index 038dfd0..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_ceilometer_contexts.py
+++ /dev/null
@@ -1,164 +0,0 @@
-from mock import patch
-
-import ceilometer_contexts as contexts
-import ceilometer_utils as utils
-
-from test_utils import CharmTestCase, mock_open
-
-TO_PATCH = [
- 'config',
- 'relation_get',
- 'relation_ids',
- 'related_units',
- 'os_release',
-]
-
-
-class CeilometerContextsTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerContextsTest, self).setUp(contexts, TO_PATCH)
- self.config.side_effect = self.test_config.get
- self.relation_get.side_effect = self.test_relation.get
-
- def tearDown(self):
- super(CeilometerContextsTest, self).tearDown()
-
- def test_logging_context(self):
- self.test_config.set('debug', False)
- self.test_config.set('verbose', False)
- self.assertEquals(contexts.LoggingConfigContext()(),
- {'debug': False, 'verbose': False})
- self.test_config.set('debug', True)
- self.test_config.set('verbose', False)
- self.assertEquals(contexts.LoggingConfigContext()(),
- {'debug': True, 'verbose': False})
- self.test_config.set('debug', True)
- self.test_config.set('verbose', True)
- self.assertEquals(contexts.LoggingConfigContext()(),
- {'debug': True, 'verbose': True})
-
- def test_mongodb_context_not_related(self):
- self.relation_ids.return_value = []
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(), {})
-
- def test_mongodb_context_related(self):
- self.relation_ids.return_value = ['shared-db:0']
- self.related_units.return_value = ['mongodb/0']
- data = {
- 'hostname': 'mongodb',
- 'port': 8090
- }
- self.test_relation.set(data)
- self.assertEquals(contexts.MongoDBContext()(),
- {'db_host': 'mongodb', 'db_port': 8090,
- 'db_name': 'ceilometer'})
-
- def test_mongodb_context_related_replset_single_mongo(self):
- self.relation_ids.return_value = ['shared-db:0']
- self.related_units.return_value = ['mongodb/0']
- data = {
- 'hostname': 'mongodb-0',
- 'port': 8090,
- 'replset': 'replset-1'
- }
- self.test_relation.set(data)
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(),
- {'db_host': 'mongodb-0', 'db_port': 8090,
- 'db_name': 'ceilometer'})
-
- @patch.object(contexts, 'context_complete')
- def test_mongodb_context_related_replset_missing_values(self, mock_ctxcmp):
- mock_ctxcmp.return_value = False
- self.relation_ids.return_value = ['shared-db:0']
- self.related_units.return_value = ['mongodb/0']
- data = {
- 'hostname': None,
- 'port': 8090,
- 'replset': 'replset-1'
- }
- self.test_relation.set(data)
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(), {})
-
- def test_mongodb_context_related_replset_multiple_mongo(self):
- self.relation_ids.return_value = ['shared-db:0']
- related_units = {
- 'mongodb/0': {'hostname': 'mongodb-0',
- 'port': 8090,
- 'replset': 'replset-1'},
- 'mongodb/1': {'hostname': 'mongodb-1',
- 'port': 8090,
- 'replset': 'replset-1'}
- }
- self.related_units.return_value = [k for k in related_units.keys()]
-
- def relation_get(attr, unit, relid):
- values = related_units.get(unit)
- if attr is None:
- return values
- else:
- return values.get(attr, None)
- self.relation_get.side_effect = relation_get
-
- self.os_release.return_value = 'icehouse'
- self.assertEquals(contexts.MongoDBContext()(),
- {'db_mongo_servers': 'mongodb-0:8090,mongodb-1:8090',
- 'db_name': 'ceilometer', 'db_replset': 'replset-1'})
-
- @patch.object(utils, 'get_shared_secret')
- def test_ceilometer_context(self, secret):
- secret.return_value = 'mysecret'
- self.assertEquals(contexts.CeilometerContext()(), {
- 'port': 8777,
- 'metering_secret': 'mysecret',
- 'api_workers': 1,
- })
-
- def test_ceilometer_service_context(self):
- self.relation_ids.return_value = ['ceilometer-service:0']
- self.related_units.return_value = ['ceilometer/0']
- data = {
- 'metering_secret': 'mysecret',
- 'keystone_host': 'test'
- }
- self.test_relation.set(data)
- self.assertEquals(contexts.CeilometerServiceContext()(), data)
-
- def test_ceilometer_service_context_not_related(self):
- self.relation_ids.return_value = []
- self.assertEquals(contexts.CeilometerServiceContext()(), {})
-
- @patch('os.path.exists')
- def test_get_shared_secret_existing(self, exists):
- exists.return_value = True
- with mock_open(utils.SHARED_SECRET, u'mysecret'):
- self.assertEquals(utils.get_shared_secret(),
- 'mysecret')
-
- @patch('uuid.uuid4')
- @patch('os.path.exists')
- def test_get_shared_secret_new(self, exists, uuid4):
- exists.return_value = False
- uuid4.return_value = 'newsecret'
- with patch('__builtin__.open'):
- self.assertEquals(utils.get_shared_secret(),
- 'newsecret')
-
- @patch.object(contexts, 'determine_apache_port')
- @patch.object(contexts, 'determine_api_port')
- def test_ha_proxy_context(self, determine_api_port, determine_apache_port):
- determine_api_port.return_value = contexts.CEILOMETER_PORT - 10
- determine_apache_port.return_value = contexts.CEILOMETER_PORT - 20
-
- haproxy_port = contexts.CEILOMETER_PORT
- api_port = haproxy_port - 10
- apache_port = api_port - 10
-
- expected = {
- 'service_ports': {'ceilometer_api': [haproxy_port, apache_port]},
- 'port': api_port
- }
- self.assertEquals(contexts.HAProxyContext()(), expected)
diff --git a/charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py b/charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py
deleted file mode 100644
index 1a0b1b1..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_ceilometer_hooks.py
+++ /dev/null
@@ -1,370 +0,0 @@
-import os
-import sys
-
-from mock import patch, MagicMock, call
-
-# python-apt is not installed as part of test-requirements but is imported by
-# some charmhelpers modules so create a fake import.
-mock_apt = MagicMock()
-sys.modules['apt'] = mock_apt
-mock_apt.apt_pkg = MagicMock()
-
-
-import ceilometer_utils
-# Patch out register_configs for import of hooks
-_register_configs = ceilometer_utils.register_configs
-ceilometer_utils.register_configs = MagicMock()
-
-with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
- mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
- lambda *args, **kwargs: f(*args, **kwargs))
- import ceilometer_hooks as hooks
-
-# Renable old function
-ceilometer_utils.register_configs = _register_configs
-
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'relation_get',
- 'relation_set',
- 'configure_installation_source',
- 'openstack_upgrade_available',
- 'do_openstack_upgrade',
- 'apt_install',
- 'apt_update',
- 'open_port',
- 'config',
- 'log',
- 'relation_ids',
- 'filter_installed_packages',
- 'CONFIGS',
- 'get_ceilometer_context',
- 'lsb_release',
- 'get_packages',
- 'service_restart',
- 'update_nrpe_config',
- 'peer_retrieve',
- 'peer_store',
- 'configure_https',
- 'status_set',
-]
-
-
-class CeilometerHooksTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerHooksTest, self).setUp(hooks, TO_PATCH)
- self.config.side_effect = self.test_config.get
- self.get_packages.return_value = \
- ceilometer_utils.CEILOMETER_BASE_PACKAGES
- self.filter_installed_packages.return_value = \
- ceilometer_utils.CEILOMETER_BASE_PACKAGES
- self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
-
- @patch('charmhelpers.payload.execd.default_execd_dir',
- return_value=os.path.join(os.getcwd(), 'exec.d'))
- @patch('charmhelpers.core.hookenv.config')
- def test_configure_source(self, mock_config, mock_execd_dir):
- self.test_config.set('openstack-origin', 'cloud:precise-havana')
- hooks.hooks.execute(['hooks/install.real'])
- self.configure_installation_source.\
- assert_called_with('cloud:precise-havana')
-
- @patch('charmhelpers.payload.execd.default_execd_dir',
- return_value=os.path.join(os.getcwd(), 'exec.d'))
- @patch('charmhelpers.core.hookenv.config')
- def test_install_hook_precise(self, mock_config, mock_execd_dir):
- hooks.hooks.execute(['hooks/install.real'])
- self.configure_installation_source.\
- assert_called_with('cloud:precise-grizzly')
- self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
- self.apt_update.assert_called_with(fatal=True)
- self.apt_install.assert_called_with(
- ceilometer_utils.CEILOMETER_BASE_PACKAGES,
- fatal=True
- )
-
- @patch('charmhelpers.payload.execd.default_execd_dir',
- return_value=os.path.join(os.getcwd(), 'exec.d'))
- @patch('charmhelpers.core.hookenv.config')
- def test_install_hook_distro(self, mock_config, mock_execd_dir):
- self.lsb_release.return_value = {'DISTRIB_CODENAME': 'saucy'}
- hooks.hooks.execute(['hooks/install.real'])
- self.configure_installation_source.\
- assert_called_with('distro')
- self.open_port.assert_called_with(hooks.CEILOMETER_PORT)
- self.apt_update.assert_called_with(fatal=True)
- self.apt_install.assert_called_with(
- ceilometer_utils.CEILOMETER_BASE_PACKAGES,
- fatal=True
- )
-
- @patch('charmhelpers.core.hookenv.config')
- def test_amqp_joined(self, mock_config):
- hooks.hooks.execute(['hooks/amqp-relation-joined'])
- self.relation_set.assert_called_with(
- username=self.test_config.get('rabbit-user'),
- vhost=self.test_config.get('rabbit-vhost'))
-
- @patch('charmhelpers.core.hookenv.config')
- def test_db_joined(self, mock_config):
- hooks.hooks.execute(['hooks/shared-db-relation-joined'])
- self.relation_set.assert_called_with(
- ceilometer_database='ceilometer')
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'ceilometer_joined')
- def test_any_changed(self, joined, mock_config):
- hooks.hooks.execute(['hooks/shared-db-relation-changed'])
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(joined.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'install')
- @patch.object(hooks, 'any_changed')
- def test_upgrade_charm(self, changed, install, mock_config):
- hooks.hooks.execute(['hooks/upgrade-charm'])
- self.assertTrue(changed.called)
- self.assertTrue(install.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'ceilometer_joined')
- def test_config_changed_no_upgrade(self, joined, mock_config):
- self.openstack_upgrade_available.return_value = False
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertFalse(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(joined.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'ceilometer_joined')
- def test_config_changed_upgrade(self, joined, mock_config):
- self.openstack_upgrade_available.return_value = True
- hooks.hooks.execute(['hooks/config-changed'])
- self.openstack_upgrade_available.\
- assert_called_with('ceilometer-common')
- self.assertTrue(self.do_openstack_upgrade.called)
- self.assertTrue(self.CONFIGS.write_all.called)
- self.assertTrue(joined.called)
-
- def test_config_changed_with_openstack_upgrade_action(self):
- self.openstack_upgrade_available.return_value = True
- self.test_config.set('action-managed-upgrade', True)
-
- hooks.hooks.execute(['hooks/config-changed'])
-
- self.assertFalse(self.do_openstack_upgrade.called)
-
- @patch.object(hooks, 'canonical_url')
- @patch('charmhelpers.core.hookenv.config')
- def test_keystone_joined(self, mock_config, _canonical_url):
- _canonical_url.return_value = "http://thishost"
- self.test_config.set('region', 'myregion')
- hooks.hooks.execute(['hooks/identity-service-relation-joined'])
- url = "http://{}:{}".format('thishost', hooks.CEILOMETER_PORT)
- self.relation_set.assert_called_with(
- service=hooks.CEILOMETER_SERVICE,
- public_url=url, admin_url=url, internal_url=url,
- requested_roles=hooks.CEILOMETER_ROLE,
- region='myregion', relation_id=None)
-
- @patch('charmhelpers.contrib.openstack.ip.service_name',
- lambda *args: 'ceilometer')
- @patch('charmhelpers.contrib.openstack.ip.unit_get')
- @patch('charmhelpers.contrib.openstack.ip.is_clustered')
- @patch('charmhelpers.core.hookenv.config')
- @patch('charmhelpers.contrib.openstack.ip.config')
- def test_keystone_joined_url_override(self, _config, mock_config,
- _is_clustered, _unit_get):
- _unit_get.return_value = "thishost"
- _is_clustered.return_value = False
- _config.side_effect = self.test_config.get
- mock_config.side_effect = self.test_config.get
- self.test_config.set('region', 'myregion')
- self.test_config.set('os-public-hostname', 'ceilometer.example.com')
- hooks.keystone_joined(None)
- url = "http://{}:{}".format('thishost', hooks.CEILOMETER_PORT)
- public_url = "http://{}:{}".format('ceilometer.example.com',
- hooks.CEILOMETER_PORT)
- self.relation_set.assert_called_with(
- service=hooks.CEILOMETER_SERVICE,
- public_url=public_url, admin_url=url, internal_url=url,
- requested_roles=hooks.CEILOMETER_ROLE,
- region='myregion', relation_id=None)
-
- @patch('charmhelpers.core.hookenv.config')
- def test_ceilometer_joined(self, mock_config):
- self.relation_ids.return_value = ['ceilometer:0']
- self.get_ceilometer_context.return_value = {'test': 'data'}
- hooks.hooks.execute(['hooks/ceilometer-service-relation-joined'])
- self.relation_set.assert_called_with('ceilometer:0',
- {'test': 'data'})
-
- @patch('charmhelpers.core.hookenv.config')
- def test_identity_notifications_changed(self, mock_config):
- self.relation_ids.return_value = ['keystone-notifications:0']
-
- self.relation_get.return_value = None
- hooks.hooks.execute(['hooks/identity-notifications-relation-changed'])
-
- self.relation_get.return_value = {('%s-endpoint-changed' %
- (hooks.CEILOMETER_SERVICE)): 1}
-
- hooks.hooks.execute(['hooks/identity-notifications-relation-changed'])
- call1 = call('ceilometer-alarm-evaluator')
- call2 = call('ceilometer-alarm-notifier')
- self.service_restart.assert_has_calls([call1, call2], any_order=False)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'install_ceilometer_ocf')
- @patch.object(hooks, 'is_elected_leader')
- def test_cluster_joined_not_leader(self, mock_leader, mock_install_ocf,
- mock_config):
- mock_leader.return_value = False
-
- hooks.hooks.execute(['hooks/cluster-relation-joined'])
- self.assertFalse(self.relation_set.called)
- self.assertTrue(self.CONFIGS.write_all.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_shared_secret')
- @patch.object(hooks, 'install_ceilometer_ocf')
- @patch.object(hooks, 'is_elected_leader')
- def test_cluster_joined_is_leader(self, mock_leader, mock_install_ocf,
- shared_secret, mock_config):
- mock_leader.return_value = True
- shared_secret.return_value = 'secret'
-
- hooks.hooks.execute(['hooks/cluster-relation-joined'])
- self.assertTrue(self.peer_store.called)
- self.peer_store.assert_called_with('shared_secret', 'secret')
- self.assertTrue(self.CONFIGS.write_all.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'set_shared_secret')
- def test_cluster_changed(self, shared_secret, mock_config):
- self.peer_retrieve.return_value = None
- hooks.hooks.execute(['hooks/cluster-relation-changed'])
- self.assertFalse(shared_secret.called)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_shared_secret')
- @patch.object(hooks, 'set_shared_secret')
- def test_cluster_changed_new_secret(self, mock_set_secret, mock_get_secret,
- mock_config):
- self.peer_retrieve.return_value = "leader_secret"
- mock_get_secret.return_value = "my_secret"
- hooks.hooks.execute(['hooks/cluster-relation-changed'])
- mock_set_secret.assert_called_with("leader_secret")
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_shared_secret')
- @patch.object(hooks, 'set_shared_secret')
- def test_cluster_changed_old_secret(self, mock_set_secret, mock_get_secret,
- mock_config):
- self.peer_retrieve.return_value = "leader_secret"
- mock_get_secret.return_value = "leader_secret"
- hooks.hooks.execute(['hooks/cluster-relation-changed'])
- self.assertEquals(mock_set_secret.call_count, 0)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_hacluster_config')
- @patch.object(hooks, 'get_iface_for_address')
- @patch.object(hooks, 'get_netmask_for_address')
- def test_ha_joined(self, mock_netmask, mock_iface, mock_cluster_config,
- mock_config):
- mock_cluster_config.return_value = {'vip': '10.0.5.100',
- 'ha-bindiface': 'bnd0',
- 'ha-mcastport': 5802}
- mock_iface.return_value = 'eth0'
- mock_netmask.return_value = '255.255.255.10'
- hooks.hooks.execute(['hooks/ha-relation-joined'])
- self.assertEquals(self.relation_set.call_count, 2)
-
- exp_resources = {
- 'res_ceilometer_haproxy': 'lsb:haproxy',
- 'res_ceilometer_agent_central': ('ocf:openstack:'
- 'ceilometer-agent-central'),
- 'res_ceilometer_eth0_vip': 'ocf:heartbeat:IPaddr2'
- }
- exp_resource_params = {
- 'res_ceilometer_haproxy': 'op monitor interval="5s"',
- 'res_ceilometer_agent_central': 'op monitor interval="30s"',
- 'res_ceilometer_eth0_vip': ('params ip="10.0.5.100" '
- 'cidr_netmask="255.255.255.10" '
- 'nic="eth0"')
- }
- exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
- call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
- call2 = call(init_services={'res_ceilometer_haproxy': 'haproxy'},
- corosync_bindiface='bnd0',
- corosync_mcastport=5802,
- resources=exp_resources,
- resource_params=exp_resource_params,
- clones=exp_clones)
- self.relation_set.assert_has_calls([call1, call2], any_order=False)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'get_netmask_for_address')
- @patch.object(hooks, 'get_hacluster_config')
- @patch.object(hooks, 'get_iface_for_address')
- @patch.object(hooks, 'relation_ids')
- @patch.object(hooks, 'related_units')
- @patch.object(hooks, 'relation_get')
- def test_ha_joined_ssl(self, mock_rel_get, mock_rel_units, mock_rel_ids,
- mock_iface, mock_cluster_config, mock_netmask,
- mock_config):
- mock_rel_ids.return_value = 'amqp:0'
- mock_rel_units.return_value = 'rabbitmq-server/0'
- mock_rel_get.return_value = '5671'
-
- mock_iface.return_value = 'eth0'
- mock_netmask.return_value = '255.255.255.10'
- mock_cluster_config.return_value = {'vip': '10.0.5.100',
- 'ha-bindiface': 'bnd0',
- 'ha-mcastport': 5802}
-
- hooks.hooks.execute(['hooks/ha-relation-joined'])
- self.assertEquals(self.relation_set.call_count, 2)
-
- exp_resources = {
- 'res_ceilometer_haproxy': 'lsb:haproxy',
- 'res_ceilometer_agent_central': ('ocf:openstack:'
- 'ceilometer-agent-central'),
- 'res_ceilometer_eth0_vip': 'ocf:heartbeat:IPaddr2'
- }
- exp_resource_params = {
- 'res_ceilometer_haproxy': 'op monitor interval="5s"',
- 'res_ceilometer_agent_central': ('params amqp_server_port="5671" '
- 'op monitor interval="30s"'),
- 'res_ceilometer_eth0_vip': ('params ip="10.0.5.100" '
- 'cidr_netmask="255.255.255.10" '
- 'nic="eth0"')
- }
- exp_clones = {'cl_ceilometer_haproxy': 'res_ceilometer_haproxy'}
- call1 = call(groups={'grp_ceilometer_vips': 'res_ceilometer_eth0_vip'})
- call2 = call(init_services={'res_ceilometer_haproxy': 'haproxy'},
- corosync_bindiface='bnd0',
- corosync_mcastport=5802,
- resources=exp_resources,
- resource_params=exp_resource_params,
- clones=exp_clones)
- self.relation_set.assert_has_calls([call1, call2], any_order=False)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'keystone_joined')
- def test_ha_changed_not_clustered(self, mock_keystone_joined, mock_config):
- self.relation_get.return_value = None
- hooks.hooks.execute(['hooks/ha-relation-changed'])
- self.assertEquals(mock_keystone_joined.call_count, 0)
-
- @patch('charmhelpers.core.hookenv.config')
- @patch.object(hooks, 'keystone_joined')
- def test_ha_changed_clustered(self, mock_keystone_joined, mock_config):
- self.relation_get.return_value = 'yes'
- self.relation_ids.return_value = ['identity-service/0']
- hooks.hooks.execute(['hooks/ha-relation-changed'])
- self.assertEquals(mock_keystone_joined.call_count, 1)
diff --git a/charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py b/charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py
deleted file mode 100644
index 8bfe59e..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_ceilometer_utils.py
+++ /dev/null
@@ -1,180 +0,0 @@
-from mock import patch, call, MagicMock
-
-import ceilometer_utils as utils
-
-from test_utils import CharmTestCase
-
-TO_PATCH = [
- 'get_os_codename_package',
- 'get_os_codename_install_source',
- 'configure_installation_source',
- 'templating',
- 'LoggingConfigContext',
- 'MongoDBContext',
- 'CeilometerContext',
- 'config',
- 'log',
- 'apt_install',
- 'apt_update',
- 'apt_upgrade',
-]
-
-
-class CeilometerUtilsTest(CharmTestCase):
-
- def setUp(self):
- super(CeilometerUtilsTest, self).setUp(utils, TO_PATCH)
- self.config.side_effect = self.test_config.get
-
- def tearDown(self):
- super(CeilometerUtilsTest, self).tearDown()
-
- def test_register_configs(self):
- configs = utils.register_configs()
- calls = []
- for conf in utils.CONFIG_FILES:
- calls.append(call(conf,
- utils.CONFIG_FILES[conf]['hook_contexts']))
- configs.register.assert_has_calls(calls, any_order=True)
-
- def test_ceilometer_release_services(self):
- """Ensure that icehouse specific services are identified"""
- self.get_os_codename_install_source.return_value = 'icehouse'
- self.assertEqual(['ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'],
- utils.ceilometer_release_services())
-
- def test_ceilometer_release_services_mitaka(self):
- """Ensure that mitaka specific services are identified"""
- self.get_os_codename_install_source.return_value = 'mitaka'
- self.assertEqual(['ceilometer-agent-notification'],
- utils.ceilometer_release_services())
-
- def test_restart_map(self):
- """Ensure that alarming services are present for < OpenStack Mitaka"""
- self.get_os_codename_install_source.return_value = 'icehouse'
- restart_map = utils.restart_map()
- self.assertEquals(
- restart_map,
- {'/etc/ceilometer/ceilometer.conf': [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-alarm-notifier',
- 'ceilometer-alarm-evaluator',
- 'ceilometer-agent-notification'],
- '/etc/haproxy/haproxy.cfg': ['haproxy'],
- "/etc/apache2/sites-available/openstack_https_frontend": [
- 'apache2'],
- "/etc/apache2/sites-available/openstack_https_frontend.conf": [
- 'apache2']
- }
- )
-
- def test_restart_map_mitaka(self):
- """Ensure that alarming services are missing for OpenStack Mitaka"""
- self.get_os_codename_install_source.return_value = 'mitaka'
- restart_map = utils.restart_map()
- self.assertEquals(
- restart_map,
- {'/etc/ceilometer/ceilometer.conf': [
- 'ceilometer-agent-central',
- 'ceilometer-collector',
- 'ceilometer-api',
- 'ceilometer-agent-notification'],
- '/etc/haproxy/haproxy.cfg': ['haproxy'],
- "/etc/apache2/sites-available/openstack_https_frontend": [
- 'apache2'],
- "/etc/apache2/sites-available/openstack_https_frontend.conf": [
- 'apache2']
- }
- )
-
- def test_get_ceilometer_conf(self):
- class TestContext():
-
- def __call__(self):
- return {'data': 'test'}
- with patch.dict(utils.CONFIG_FILES,
- {'/etc/ceilometer/ceilometer.conf': {
- 'hook_contexts': [TestContext()]
- }}):
- self.assertTrue(utils.get_ceilometer_context(),
- {'data': 'test'})
-
- def test_do_openstack_upgrade(self):
- self.config.side_effect = self.test_config.get
- self.test_config.set('openstack-origin', 'cloud:trusty-kilo')
- self.get_os_codename_install_source.return_value = 'kilo'
- configs = MagicMock()
- utils.do_openstack_upgrade(configs)
- configs.set_release.assert_called_with(openstack_release='kilo')
- self.assertTrue(self.log.called)
- self.apt_update.assert_called_with(fatal=True)
- dpkg_opts = [
- '--option', 'Dpkg::Options::=--force-confnew',
- '--option', 'Dpkg::Options::=--force-confdef',
- ]
- self.apt_install.assert_called_with(
- packages=utils.CEILOMETER_BASE_PACKAGES + utils.ICEHOUSE_PACKAGES,
- options=dpkg_opts, fatal=True
- )
- self.configure_installation_source.assert_called_with(
- 'cloud:trusty-kilo'
- )
-
- def test_get_packages_icehouse(self):
- self.get_os_codename_install_source.return_value = 'icehouse'
- self.assertEqual(utils.get_packages(),
- utils.CEILOMETER_BASE_PACKAGES +
- utils.ICEHOUSE_PACKAGES)
-
- def test_get_packages_mitaka(self):
- self.get_os_codename_install_source.return_value = 'mitaka'
- self.assertEqual(utils.get_packages(),
- utils.CEILOMETER_BASE_PACKAGES +
- utils.MITAKA_PACKAGES)
-
- def test_assess_status(self):
- with patch.object(utils, 'assess_status_func') as asf:
- callee = MagicMock()
- asf.return_value = callee
- utils.assess_status('test-config')
- asf.assert_called_once_with('test-config')
- callee.assert_called_once_with()
-
- @patch.object(utils, 'REQUIRED_INTERFACES')
- @patch.object(utils, 'services')
- @patch.object(utils, 'determine_ports')
- @patch.object(utils, 'make_assess_status_func')
- def test_assess_status_func(self,
- make_assess_status_func,
- determine_ports,
- services,
- REQUIRED_INTERFACES):
- services.return_value = 's1'
- determine_ports.return_value = 'p1'
- utils.assess_status_func('test-config')
- make_assess_status_func.assert_called_once_with(
- 'test-config', REQUIRED_INTERFACES, services='s1', ports='p1')
-
- def test_pause_unit_helper(self):
- with patch.object(utils, '_pause_resume_helper') as prh:
- utils.pause_unit_helper('random-config')
- prh.assert_called_once_with(utils.pause_unit, 'random-config')
- with patch.object(utils, '_pause_resume_helper') as prh:
- utils.resume_unit_helper('random-config')
- prh.assert_called_once_with(utils.resume_unit, 'random-config')
-
- @patch.object(utils, 'services')
- @patch.object(utils, 'determine_ports')
- def test_pause_resume_helper(self, determine_ports, services):
- f = MagicMock()
- services.return_value = 's1'
- determine_ports.return_value = 'p1'
- with patch.object(utils, 'assess_status_func') as asf:
- asf.return_value = 'assessor'
- utils._pause_resume_helper(f, 'some-config')
- asf.assert_called_once_with('some-config')
- f.assert_called_once_with('assessor', services='s1', ports='p1')
diff --git a/charms/trusty/ceilometer/unit_tests/test_utils.py b/charms/trusty/ceilometer/unit_tests/test_utils.py
deleted file mode 100644
index e90679e..0000000
--- a/charms/trusty/ceilometer/unit_tests/test_utils.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import logging
-import unittest
-import os
-import yaml
-import io
-
-from contextlib import contextmanager
-from mock import patch
-
-
-@contextmanager
-def mock_open(filename, contents=None):
- ''' Slightly simpler mock of open to return contents for filename '''
- def mock_file(*args):
- if args[0] == filename:
- return io.StringIO(contents)
- else:
- return open(*args)
- with patch('__builtin__.open', mock_file):
- yield
-
-
-def load_config():
- '''
- Walk backwords from __file__ looking for config.yaml, load and return the
- 'options' section'
- '''
- config = None
- f = __file__
- while config is None:
- d = os.path.dirname(f)
- if os.path.isfile(os.path.join(d, 'config.yaml')):
- config = os.path.join(d, 'config.yaml')
- break
- f = d
-
- if not config:
- logging.error('Could not find config.yaml in any parent directory '
- 'of %s. ' % file)
- raise Exception
-
- return yaml.safe_load(open(config).read())['options']
-
-
-def get_default_config():
- '''
- Load default charm config from config.yaml return as a dict.
- If no default is set in config.yaml, its value is None.
- '''
- default_config = {}
- config = load_config()
- for k, v in config.iteritems():
- if 'default' in v:
- default_config[k] = v['default']
- else:
- default_config[k] = None
- return default_config
-
-
-class CharmTestCase(unittest.TestCase):
- def setUp(self, obj, patches):
- super(CharmTestCase, self).setUp()
- self.patches = patches
- self.obj = obj
- self.test_config = TestConfig()
- self.test_relation = TestRelation()
- self.patch_all()
-
- def patch(self, method):
- _m = patch.object(self.obj, method)
- mock = _m.start()
- self.addCleanup(_m.stop)
- return mock
-
- def patch_all(self):
- for method in self.patches:
- setattr(self, method, self.patch(method))
-
-
-class TestConfig(object):
- def __init__(self):
- self.config = get_default_config()
-
- def get(self, attr):
- try:
- return self.config[attr]
- except KeyError:
- return None
-
- def get_all(self):
- return self.config
-
- def set(self, attr, value):
- if attr not in self.config:
- raise KeyError
- self.config[attr] = value
-
-
-class TestRelation(object):
- def __init__(self, relation_data={}):
- self.relation_data = relation_data
-
- def set(self, relation_data):
- self.relation_data = relation_data
-
- def get(self, attr=None, unit=None, rid=None):
- if attr is None:
- return self.relation_data
- elif attr in self.relation_data:
- return self.relation_data[attr]
- return None
diff --git a/charms/trusty/contrail-analytics/.bzrignore b/charms/trusty/contrail-analytics/.bzrignore
deleted file mode 100644
index ba077a4..0000000
--- a/charms/trusty/contrail-analytics/.bzrignore
+++ /dev/null
@@ -1 +0,0 @@
-bin
diff --git a/charms/trusty/contrail-analytics/Makefile b/charms/trusty/contrail-analytics/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/contrail-analytics/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/contrail-analytics/README.md b/charms/trusty/contrail-analytics/README.md
deleted file mode 100644
index 0cfa491..0000000
--- a/charms/trusty/contrail-analytics/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This charm provides the analytics node component which includes
-contrail-collector, contrail-query-engine and contrail-analytics-api services.
-Only OpenStack Icehouse or newer is supported.
-
-Usage
------
-
-Cassandra and Contrail Configuration are prerequisite services to deploy.
-Once ready, deploy and relate as follows:
-
- juju deploy contrail-analytics
- juju add-relation contrail-analytics:cassandra cassandra:database
- juju add-relation contrail-analytics contrail-configuration
-
-Install Sources
----------------
-
-The version of OpenContrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
-
-The version of dependent OpenStack components installed when deploying can be
-changed using the 'openstack-origin' option. When deploying to different
-OpenStack versions, openstack-origin needs to be set across all OpenStack and
-OpenContrail charms where available.
-
-High Availability (HA)
-----------------------
-
-Multiple units of this charm can be deployed to support HA deployments:
-
- juju add-unit contrail-analytics
-
-Relating to haproxy charm (http-services relation) allows multiple units to be
-load balanced:
-
- juju add-relation contrail-analytics haproxy
diff --git a/charms/trusty/contrail-analytics/charm-helpers-sync.yaml b/charms/trusty/contrail-analytics/charm-helpers-sync.yaml
deleted file mode 100644
index eadff82..0000000
--- a/charms/trusty/contrail-analytics/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
- - contrib.network
- - contrib.openstack|inc=*
- - contrib.python
- - contrib.storage
diff --git a/charms/trusty/contrail-analytics/config.yaml b/charms/trusty/contrail-analytics/config.yaml
deleted file mode 100644
index b92d33e..0000000
--- a/charms/trusty/contrail-analytics/config.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-options:
- install-sources:
- type: string
- default: |
- - "ppa:opencontrail/ppa"
- - "ppa:opencontrail/r2.20"
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
- openstack-origin:
- type: string
- default: distro
- description: |
- Repository from which to install. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported Cloud Archive release pocket.
-
- Supported Cloud Archive sources include: cloud:precise-folsom,
- cloud:precise-folsom/updates, cloud:precise-folsom/staging,
- cloud:precise-folsom/proposed.
- vip:
- type: string
- description: |
- Virtual IP address to use when services are related in a High Availability
- configuration.
- cassandra-units:
- type: int
- default: 1
- description: Minimum number of units required in cassandra relation
- kafka-units:
- type: int
- default: 1
- description: Minimum number of units required in kafka relation
diff --git a/charms/trusty/contrail-analytics/copyright b/charms/trusty/contrail-analytics/copyright
deleted file mode 100644
index 567db82..0000000
--- a/charms/trusty/contrail-analytics/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/files/contrail b/charms/trusty/contrail-analytics/files/contrail
deleted file mode 100644
index 39e20cc..0000000
--- a/charms/trusty/contrail-analytics/files/contrail
+++ /dev/null
@@ -1,5 +0,0 @@
-Explanation: Use contrail version of the package.
-Package: python-redis
-Pin: version /contrail/
-Pin-Priority: 1001
-
diff --git a/charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini b/charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini
deleted file mode 100644
index aa3ab8a..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-alarm-gen.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-[program:contrail-alarm-gen]
-command=/usr/bin/contrail-alarm-gen --conf_file /etc/contrail/contrail-keystone-auth.conf --conf_file /etc/contrail/contrail-alarm-gen.conf
-priority=440
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-alarm-gen-stdout.log
-stderr_logfile=/var/log/contrail/contrail-alarm-gen-stderr.log
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
-user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-analytics-api.ini b/charms/trusty/contrail-analytics/files/contrail-analytics-api.ini
deleted file mode 100644
index 3f76d94..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-analytics-api.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-[program:contrail-analytics-api]
-command=/usr/bin/contrail-analytics-api --conf_file /etc/contrail/contrail-keystone-auth.conf --conf_file /etc/contrail/contrail-analytics-api.conf
-priority=440
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-analytics-api-stdout.log
-stderr_logfile=/var/log/contrail/contrail-analytics-api-stderr.log
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
-user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr b/charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr
deleted file mode 100644
index a88aafa..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-analytics-nodemgr
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network Analytics Node Manager
-
-supervisorctl -s unix:///tmp/supervisord_analytics.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-analytics/files/contrail-collector.ini b/charms/trusty/contrail-analytics/files/contrail-collector.ini
deleted file mode 100644
index 17a5617..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-collector.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-[program:contrail-collector]
-command=/usr/bin/contrail-collector --conf_file /etc/contrail/contrail-keystone-auth.conf --conf_file /etc/contrail/contrail-collector.conf
-priority=420
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-collector-stdout.log
-stderr_logfile=/dev/null
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
-user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini b/charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini
deleted file mode 100644
index 5aa2873..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-nodemgr-analytics.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[eventlistener:contrail-analytics-nodemgr]
-command=/bin/bash -c "exec /usr/bin/contrail-nodemgr"
-events=PROCESS_COMMUNICATION,PROCESS_STATE,TICK_60
-buffer_size=10000
-stdout_logfile=/var/log/contrail/contrail-analytics-nodemgr-stdout.log
-stderr_logfile=/var/log/contrail/contrail-analytics-nodemgr-stderr.log
diff --git a/charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini b/charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini
deleted file mode 100644
index 5f28ac5..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-snmp-collector.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-[program:contrail-snmp-collector]
-command=/usr/bin/contrail-snmp-collector --conf_file /etc/contrail/contrail-snmp-collector.conf --conf_file /etc/contrail/contrail-keystone-auth.conf
-priority=340
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-snmp-collector-stdout.log
-stderr_logfile=/var/log/contrail/contrail-snmp-collector-stderr.log
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
-user=contrail
diff --git a/charms/trusty/contrail-analytics/files/contrail-topology.ini b/charms/trusty/contrail-analytics/files/contrail-topology.ini
deleted file mode 100644
index bce6a0b..0000000
--- a/charms/trusty/contrail-analytics/files/contrail-topology.ini
+++ /dev/null
@@ -1,13 +0,0 @@
-[program:contrail-topology]
-command=/usr/bin/contrail-topology --conf_file /etc/contrail/contrail-topology.conf --conf_file /etc/contrail/contrail-keystone-auth.conf
-priority=340
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-topology-stdout.log
-stderr_logfile=/var/log/contrail/contrail-topology-stderr.log
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
-user=contrail
diff --git a/charms/trusty/contrail-analytics/files/ntpq-nodemgr b/charms/trusty/contrail-analytics/files/ntpq-nodemgr
deleted file mode 100755
index da00247..0000000
--- a/charms/trusty/contrail-analytics/files/ntpq-nodemgr
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-#
-# Script to produce some dummy output to satisfy contrail-nodemgr ntp status
-# Note: This is intended to be deployed inside containers where the host is running ntp
-
-if [ -x /usr/bin/ntpq ]; then
- exec /usr/bin/ntpq "$@"
-fi
-
-echo "*"
diff --git a/charms/trusty/contrail-analytics/hooks/cassandra-relation-broken b/charms/trusty/contrail-analytics/hooks/cassandra-relation-broken
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/cassandra-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/cassandra-relation-changed b/charms/trusty/contrail-analytics/hooks/cassandra-relation-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/cassandra-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/cassandra-relation-departed b/charms/trusty/contrail-analytics/hooks/cassandra-relation-departed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/cassandra-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index 7f3b66b..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,456 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.core.hookenv import unit_get
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network: %s" % network)
- raise ValueError(errmsg)
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
-
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any."""
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4 address
- socket.inet_aton(address)
- return True
- except socket.error:
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- apt_install('python-dnspython')
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- answers = dns.resolver.query(address, rtype)
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- apt_install("python-dnspython")
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py
deleted file mode 100644
index 77e2db7..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helpers for interacting with OpenvSwitch '''
-import subprocess
-import os
-from charmhelpers.core.hookenv import (
- log, WARNING
-)
-from charmhelpers.core.host import (
- service
-)
-
-
-def add_bridge(name):
- ''' Add the named bridge to openvswitch '''
- log('Creating bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
-
-
-def del_bridge(name):
- ''' Delete the named bridge from openvswitch '''
- log('Deleting bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
-
-
-def add_bridge_port(name, port, promisc=False):
- ''' Add a port to the named openvswitch bridge '''
- log('Adding port {} to bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "up"])
- if promisc:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
- else:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def del_bridge_port(name, port):
- ''' Delete a port from the named openvswitch bridge '''
- log('Deleting port {} from bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "down"])
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def set_manager(manager):
- ''' Set the controller for the local openvswitch '''
- log('Setting manager for local ovs to {}'.format(manager))
- subprocess.check_call(['ovs-vsctl', 'set-manager',
- 'ssl:{}'.format(manager)])
-
-
-CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
-
-
-def get_certificate():
- ''' Read openvswitch certificate from disk '''
- if os.path.exists(CERT_PATH):
- log('Reading ovs certificate from {}'.format(CERT_PATH))
- with open(CERT_PATH, 'r') as cert:
- full_cert = cert.read()
- begin_marker = "-----BEGIN CERTIFICATE-----"
- end_marker = "-----END CERTIFICATE-----"
- begin_index = full_cert.find(begin_marker)
- end_index = full_cert.rfind(end_marker)
- if end_index == -1 or begin_index == -1:
- raise RuntimeError("Certificate does not contain valid begin"
- " and end markers.")
- full_cert = full_cert[begin_index:(end_index + len(end_marker))]
- return full_cert
- else:
- log('Certificate not found', level=WARNING)
- return None
-
-
-def full_restart():
- ''' Full restart and reload of openvswitch '''
- if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
- service('start', 'openvswitch-force-reload-kmod')
- else:
- service('force-reload-kmod', 'openvswitch-switch')
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py
deleted file mode 100644
index b65d963..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/network/ufw.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module contains helpers to add and remove ufw rules.
-
-Examples:
-
-- open SSH port for subnet 10.0.3.0/24:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
-
-- open service by name as defined in /etc/services:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('ssh', 'open')
-
-- close service by port number:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('4949', 'close') # munin
-"""
-import re
-import os
-import subprocess
-
-from charmhelpers.core import hookenv
-from charmhelpers.core.kernel import modprobe, is_module_loaded
-
-__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
-
-
-class UFWError(Exception):
- pass
-
-
-class UFWIPv6Error(UFWError):
- pass
-
-
-def is_enabled():
- """
- Check if `ufw` is enabled
-
- :returns: True if ufw is enabled
- """
- output = subprocess.check_output(['ufw', 'status'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Status: active\n', output, re.M)
-
- return len(m) >= 1
-
-
-def is_ipv6_ok(soft_fail=False):
- """
- Check if IPv6 support is present and ip6tables functional
-
- :param soft_fail: If set to True and IPv6 support is broken, then reports
- that the host doesn't have IPv6 support, otherwise a
- UFWIPv6Error exception is raised.
- :returns: True if IPv6 is working, False otherwise
- """
-
- # do we have IPv6 in the machine?
- if os.path.isdir('/proc/sys/net/ipv6'):
- # is ip6tables kernel module loaded?
- if not is_module_loaded('ip6_tables'):
- # ip6tables support isn't complete, let's try to load it
- try:
- modprobe('ip6_tables')
- # great, we can load the module
- return True
- except subprocess.CalledProcessError as ex:
- hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
- level="WARN")
- # we are in a world where ip6tables isn't working
- if soft_fail:
- # so we inform that the machine doesn't have IPv6
- return False
- else:
- raise UFWIPv6Error("IPv6 firewall support broken")
- else:
- # the module is present :)
- return True
-
- else:
- # the system doesn't have IPv6
- return False
-
-
-def disable_ipv6():
- """
- Disable ufw IPv6 support in /etc/default/ufw
- """
- exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
- '/etc/default/ufw'])
- if exit_code == 0:
- hookenv.log('IPv6 support in ufw disabled', level='INFO')
- else:
- hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
- raise UFWError("Couldn't disable IPv6 support in ufw")
-
-
-def enable(soft_fail=False):
- """
- Enable ufw
-
- :param soft_fail: If set to True silently disables IPv6 support in ufw,
- otherwise a UFWIPv6Error exception is raised when IP6
- support is broken.
- :returns: True if ufw is successfully enabled
- """
- if is_enabled():
- return True
-
- if not is_ipv6_ok(soft_fail):
- disable_ipv6()
-
- output = subprocess.check_output(['ufw', 'enable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall('^Firewall is active and enabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be enabled", level='WARN')
- return False
- else:
- hookenv.log("ufw enabled", level='INFO')
- return True
-
-
-def disable():
- """
- Disable ufw
-
- :returns: True if ufw is successfully disabled
- """
- if not is_enabled():
- return True
-
- output = subprocess.check_output(['ufw', 'disable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Firewall stopped and disabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be disabled", level='WARN')
- return False
- else:
- hookenv.log("ufw disabled", level='INFO')
- return True
-
-
-def default_policy(policy='deny', direction='incoming'):
- """
- Changes the default policy for traffic `direction`
-
- :param policy: allow, deny or reject
- :param direction: traffic direction, possible values: incoming, outgoing,
- routed
- """
- if policy not in ['allow', 'deny', 'reject']:
- raise UFWError(('Unknown policy %s, valid values: '
- 'allow, deny, reject') % policy)
-
- if direction not in ['incoming', 'outgoing', 'routed']:
- raise UFWError(('Unknown direction %s, valid values: '
- 'incoming, outgoing, routed') % direction)
-
- output = subprocess.check_output(['ufw', 'default', policy, direction],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
- hookenv.log(output, level='DEBUG')
-
- m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
- policy),
- output, re.M)
- if len(m) == 0:
- hookenv.log("ufw couldn't change the default policy to %s for %s"
- % (policy, direction), level='WARN')
- return False
- else:
- hookenv.log("ufw default policy for %s changed to %s"
- % (direction, policy), level='INFO')
- return True
-
-
-def modify_access(src, dst='any', port=None, proto=None, action='allow',
- index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param action: `allow` or `delete`
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- if not is_enabled():
- hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
- return
-
- if action == 'delete':
- cmd = ['ufw', 'delete', 'allow']
- elif index is not None:
- cmd = ['ufw', 'insert', str(index), action]
- else:
- cmd = ['ufw', action]
-
- if src is not None:
- cmd += ['from', src]
-
- if dst is not None:
- cmd += ['to', dst]
-
- if port is not None:
- cmd += ['port', str(port)]
-
- if proto is not None:
- cmd += ['proto', proto]
-
- hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
-
- hookenv.log(stdout, level='INFO')
-
- if p.returncode != 0:
- hookenv.log(stderr, level='ERROR')
- hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
- p.returncode),
- level='ERROR')
-
-
-def grant_access(src, dst='any', port=None, proto=None, index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
- index=index)
-
-
-def revoke_access(src, dst='any', port=None, proto=None):
- """
- Revoke access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
-
-
-def service(name, action):
- """
- Open/close access to a service
-
- :param name: could be a service name defined in `/etc/services` or a port
- number.
- :param action: `open` or `close`
- """
- if action == 'open':
- subprocess.check_output(['ufw', 'allow', str(name)],
- universal_newlines=True)
- elif action == 'close':
- subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
- universal_newlines=True)
- else:
- raise UFWError(("'{}' not supported, use 'allow' "
- "or 'delete'").format(action))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index ef77caf..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 722bc64..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None, stable=True):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty) = range(12)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index b139741..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,963 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-import novaclient.v1_1.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'tenantId': act.tenantId,
- 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- service_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
-# rabbitmq/amqp specific helpers:
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not port and not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.configure('rabbitmq-server', config)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.configure('rabbitmq-server', config)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index 1248d49..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1416 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call
-
-import six
-import yaml
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import get_host_ip
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-class OSContextError(Exception):
- pass
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- log("config_flags is not in expected format (key=value)", level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- log("Invalid config value(s) at index %s" % (i), level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- ctxt['stat_port'] = ':::8888'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
- ctxt['stat_port'] = ':8888'
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_config': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from subordinate_config '
- 'setting from %s' % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_config on %s but it contained'
- 'nothing for %s service' % (rid, service),
- level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_config on %s but it contained'
- 'nothing for %s' % (rid, self.config_file),
- level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- try:
- from psutil import NUM_CPUS
- except ImportError:
- apt_install('python-psutil', fatal=True)
- from psutil import NUM_CPUS
-
- return NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- ctxt = {"workers": self.num_cpus * multiplier}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {bridge: normalized[port] for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.values():
- ports = mappings.values()
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt["devs"] = '\\n'.join(ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
deleted file mode 100755
index eb8527f..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-CRITICAL=0
-NOTACTIVE=''
-LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
-do
- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
- if [ $? != 0 ]; then
- date >> $LOGFILE
- echo $output >> $LOGFILE
- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
- CRITICAL=1
- NOTACTIVE="${NOTACTIVE} $appserver"
- fi
-done
-
-if [ $CRITICAL = 1 ]; then
- echo "CRITICAL:${NOTACTIVE}"
- exit 2
-fi
-
-echo "OK: All haproxy instances looking good"
-exit 0
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
deleted file mode 100755
index 3ebb532..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-# These should be config options at some stage
-CURRQthrsh=0
-MAXQthrsh=100
-
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
-
-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
-do
- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
-
- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
- exit 2
- fi
-done
-
-echo "OK: All haproxy queue depths looking good"
-exit 0
-
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 3dca6dc..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured.
-
- :param endpoint_type: Network endpoing type
- """
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- clustered = is_clustered()
- if clustered:
- if not net_addr:
- # If no net-splits defined, we expect a single vip
- resolved_address = vips[0]
- else:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- resolved_address = get_address_in_network(net_addr, fallback_addr)
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index 55b2037..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,356 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return ['openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [['plumgrid-lxc'],
- ['iovisor-dkms']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of port:bridge mappings.
-
- Returns dict of the form {port:bridge} where port may be an mac address or
- interface name.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
deleted file mode 100644
index b99851c..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# cinder configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[global]
-{% if auth -%}
-auth_supported = {{ auth }}
-keyring = /etc/ceph/$cluster.$name.keyring
-mon host = {{ mon_hosts }}
-{% endif -%}
-log to syslog = {{ use_syslog }}
-err to syslog = {{ use_syslog }}
-clog to syslog = {{ use_syslog }}
-
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart
deleted file mode 100644
index 4bed404..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/git.upstart
+++ /dev/null
@@ -1,17 +0,0 @@
-description "{{ service_description }}"
-author "Juju {{ service_name }} Charm <juju@localhost>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-exec start-stop-daemon --start --chuid {{ user_name }} \
- --chdir {{ start_dir }} --name {{ process_name }} \
- --exec {{ executable_name }} -- \
- {% for config_file in config_files -%}
- --config-file={{ config_file }} \
- {% endfor -%}
- {% if log_file -%}
- --log-file={{ log_file }}
- {% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
deleted file mode 100644
index ad875f1..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ /dev/null
@@ -1,58 +0,0 @@
-global
- log {{ local_host }} local0
- log {{ local_host }} local1 notice
- maxconn 20000
- user haproxy
- group haproxy
- spread-checks 0
-
-defaults
- log global
- mode tcp
- option tcplog
- option dontlognull
- retries 3
- timeout queue 1000
- timeout connect 1000
-{% if haproxy_client_timeout -%}
- timeout client {{ haproxy_client_timeout }}
-{% else -%}
- timeout client 30000
-{% endif -%}
-
-{% if haproxy_server_timeout -%}
- timeout server {{ haproxy_server_timeout }}
-{% else -%}
- timeout server 30000
-{% endif -%}
-
-listen stats {{ stat_port }}
- mode http
- stats enable
- stats hide-version
- stats realm Haproxy\ Statistics
- stats uri /
- stats auth admin:password
-
-{% if frontends -%}
-{% for service, ports in service_ports.items() -%}
-frontend tcp-in_{{ service }}
- bind *:{{ ports[0] }}
- {% if ipv6 -%}
- bind :::{{ ports[0] }}
- {% endif -%}
- {% for frontend in frontends -%}
- acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
- use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
- {% endfor -%}
- default_backend {{ service }}_{{ default_backend }}
-
-{% for frontend in frontends -%}
-backend {{ service }}_{{ frontend }}
- balance leastconn
- {% for unit, address in frontends[frontend]['backends'].items() -%}
- server {{ unit }} {{ address }}:{{ ports[1] }} check
- {% endfor %}
-{% endfor -%}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
deleted file mode 100644
index ce28fa3..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
+++ /dev/null
@@ -1,24 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
deleted file mode 100644
index ce28fa3..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
deleted file mode 100644
index 2a37edd..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
deleted file mode 100644
index b444c9c..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-[oslo_messaging_rabbit]
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
deleted file mode 100644
index 95f1a76..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
+++ /dev/null
@@ -1,14 +0,0 @@
-{% if zmq_host -%}
-# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
-rpc_backend = zmq
-rpc_zmq_host = {{ zmq_host }}
-{% if zmq_redis_address -%}
-rpc_zmq_matchmaker = redis
-matchmaker_heartbeat_freq = 15
-matchmaker_heartbeat_ttl = 30
-[matchmaker_redis]
-host = {{ zmq_redis_address }}
-{% else -%}
-rpc_zmq_matchmaker = ring
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index e5e3cb1..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 24b998d..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,926 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-
-import six
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- config,
- log as juju_log,
- charm_dir,
- INFO,
- relation_ids,
- relation_set,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import lsb_release, mounts, umount
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
-])
-
-# The ugly duckling
-SWIFT_CODENAMES = OrderedDict([
- ('1.4.3', 'diablo'),
- ('1.4.8', 'essex'),
- ('1.7.4', 'folsom'),
- ('1.8.0', 'grizzly'),
- ('1.7.7', 'grizzly'),
- ('1.7.6', 'grizzly'),
- ('1.10.0', 'havana'),
- ('1.9.1', 'havana'),
- ('1.9.0', 'havana'),
- ('1.13.1', 'icehouse'),
- ('1.13.0', 'icehouse'),
- ('1.12.0', 'icehouse'),
- ('1.11.0', 'icehouse'),
- ('2.0.0', 'juno'),
- ('2.1.0', 'juno'),
- ('2.2.0', 'juno'),
- ('2.2.1', 'kilo'),
- ('2.2.2', 'kilo'),
- ('2.3.0', 'liberty'),
- ('2.4.0', 'liberty'),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12.0.0', 'liberty'),
- ]),
- 'neutron-common': OrderedDict([
- ('7.0.0', 'liberty'),
- ]),
- 'cinder-common': OrderedDict([
- ('7.0.0', 'liberty'),
- ]),
- 'keystone': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
- 'horizon-common': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5.0.0', 'liberty'),
- ]),
- 'heat-common': OrderedDict([
- ('5.0.0', 'liberty'),
- ]),
- 'glance-common': OrderedDict([
- ('11.0.0', 'liberty'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- if match:
- vers = match.group(0)
-
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- swift_vers = vers[:5]
- if swift_vers not in SWIFT_CODENAMES:
- # Deal with 1.10.0 upward
- swift_vers = vers[:6]
- return SWIFT_CODENAMES[swift_vers]
- else:
- vers = vers[:6]
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- else:
- vers_map = OPENSTACK_CODENAMES
-
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
- "--recv-keys %s" % keyid
- try:
- subprocess.check_call(cmd.split(' '))
- except subprocess.CalledProcessError:
- error_out("Error importing repo key %s" % keyid)
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src = rel
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- l = len(rel.split('|'))
- if l == 2:
- src, key = rel.split('|')
- juju_log("Importing PPA key from keyserver for %s" % src)
- import_key(key)
- elif l == 1:
- src = rel
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
- else:
- available_vers = get_os_version_install_source(src)
- apt.init()
- return apt.version_compare(available_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-requirements_dir = None
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-def git_clone_and_install(projects_yaml, core_project, depth=1):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements):
- """
- Clone and install a single git repository.
- """
- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
-
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- if not os.path.exists(dest_dir):
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
- depth=depth)
- else:
- repo_dir = dest_dir
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv)
- else:
- pip_install(repo_dir, venv=venv)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Set workload status based on complete contexts.
- status-set missing or incomplete contexts
- and juju-log details of missing required data.
- charm_func is a charm specific function to run checking
- for charm specific requirements such as a VIP setting.
- """
- incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
- state = 'active'
- missing_relations = []
- incomplete_relations = []
- message = None
- charm_state = None
- charm_message = None
-
- for generic_interface in incomplete_rel_data.keys():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface in incomplete_rel_data[generic_interface]:
- if incomplete_rel_data[generic_interface][interface].get('related'):
- related_interface = interface
- missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
- # No relation ID for the generic_interface
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
- else:
- # Relation ID exists but no related unit
- if not missing_data:
- # Edge case relation ID exists but departing
- if ('departed' in hook_name() or 'broken' in hook_name()) \
- and related_interface in hook_name():
- state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface), "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has "
- "no units in the relation."
- "".format(generic_interface, related_interface), "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in incomplete_relations \
- and generic_interface not in missing_relations:
- incomplete_relations.append(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- # Run charm specific checks
- if charm_func:
- charm_state, charm_message = charm_func(configs)
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- message = "{} {}".format(message, charm_message)
- else:
- message = charm_message
-
- # Set to active if all requirements have been met
- if state == 'active':
- message = "Unit is ready"
- juju_log(message, "INFO")
-
- status_set(state, message)
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """
- Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = []
- for svc_type in required_interfaces.keys():
- # Avoid duplicates
- found_ctxt = False
- for interface in required_interfaces[svc_type]:
- if interface in complete_ctxts:
- found_ctxt = True
- if not found_ctxt:
- incomplete_relations.append(svc_type)
- incomplete_context_data = {}
- for i in incomplete_relations:
- incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
- return incomplete_context_data
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py
deleted file mode 100644
index 871cd6f..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/debug.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-import atexit
-import sys
-
-from charmhelpers.contrib.python.rpdb import Rpdb
-from charmhelpers.core.hookenv import (
- open_port,
- close_port,
- ERROR,
- log
-)
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-DEFAULT_ADDR = "0.0.0.0"
-DEFAULT_PORT = 4444
-
-
-def _error(message):
- log(message, level=ERROR)
-
-
-def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
- """
- Set a trace point using the remote debugger
- """
- atexit.register(close_port, port)
- try:
- log("Starting a remote python debugger session on %s:%s" % (addr,
- port))
- open_port(port)
- debugger = Rpdb(addr=addr, port=port)
- debugger.set_trace(sys._getframe().f_back)
- except:
- _error("Cannot start a remote debug session on %s:%s" % (addr,
- port))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index 10b32e3..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-try:
- from pip import main as pip_execute
-except ImportError:
- apt_update()
- apt_install('python-pip')
- from pip import main as pip_execute
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, **options):
- """Install a requirements file """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- apt_install('python-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py
deleted file mode 100644
index d503f88..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/rpdb.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Remote Python Debugger (pdb wrapper)."""
-
-import pdb
-import socket
-import sys
-
-__author__ = "Bertrand Janin <b@janin.com>"
-__version__ = "0.1.3"
-
-
-class Rpdb(pdb.Pdb):
-
- def __init__(self, addr="127.0.0.1", port=4444):
- """Initialize the socket and initialize pdb."""
-
- # Backup stdin and stdout before replacing them by the socket handle
- self.old_stdout = sys.stdout
- self.old_stdin = sys.stdin
-
- # Open a 'reusable' socket to let the webapp reload on the same port
- self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
- self.skt.bind((addr, port))
- self.skt.listen(1)
- (clientsocket, address) = self.skt.accept()
- handle = clientsocket.makefile('rw')
- pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
- sys.stdout = sys.stdin = handle
-
- def shutdown(self):
- """Revert stdin and stdout, close the socket."""
- sys.stdout = self.old_stdout
- sys.stdin = self.old_stdin
- self.skt.close()
- self.set_continue()
-
- def do_continue(self, arg):
- """Stop all operation on ``continue``."""
- self.shutdown()
- return 1
-
- do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py
deleted file mode 100644
index c39fcbf..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/python/version.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def current_version():
- """Current system python version"""
- return sys.version_info
-
-
-def current_version_string():
- """Current system python version as string major.minor.micro"""
- return "{0}.{1}.{2}".format(sys.version_info.major,
- sys.version_info.minor,
- sys.version_info.micro)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index 83f264d..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,657 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-
-from charmhelpers.core.kernel import modprobe
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def get_osds(service):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
- """
- version = ceph_version()
- if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
-
- return None
-
-
-def create_pool(service, name, replicas=3):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pgnum = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pgnum = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
- check_call(cmd)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
- str(replicas)]
- check_call(cmd)
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes():
- """Query named relation 'ceph' to determine current nodes."""
- hosts = []
- for r_id in relation_ids('ceph'):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- create_keyring(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- Returns False if no ceph key is available in relation state.
- """
- key = None
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- create_keyring(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_create_pool(self, name, replica_count=3):
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op']:
- if self.ops[req_no][key] != other.ops[req_no][key]:
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-# <Request complete actions>
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids('ceph'):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids('ceph'):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index c296f09..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- devs = [d.strip().split(' ') for d in
- check_output(cmd).splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index 34b5f71..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index 1e57941..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- check_call,
- check_output,
- call
-)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- is_partition = bool(re.search(r".*[0-9]+\b", device))
- out = check_output(['mount']).decode('UTF-8')
- if is_partition:
- return bool(re.search(device + r"\b", out))
- return bool(re.search(device + r"[0-9]*\b", out))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index ab53a78..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,898 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peer'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peer``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peer'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py
deleted file mode 100644
index cb3c527..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 4aaca3f..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3f67783..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index efd7f9f..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f023b26..0000000
--- a/charms/trusty/contrail-analytics/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-analytics/hooks/config-changed b/charms/trusty/contrail-analytics/hooks/config-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined b/charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-analytics-api-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed b/charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-api-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed b/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail-discovery-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py b/charms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py
deleted file mode 100755
index c05a53f..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail_analytics_hooks.py
+++ /dev/null
@@ -1,305 +0,0 @@
-#!/usr/bin/env python
-
-from socket import gethostbyname
-import sys
-import shutil
-
-from apt_pkg import version_compare
-import yaml
-
-from charmhelpers.contrib.openstack.utils import configure_installation_source
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- local_unit,
- log,
- relation_get,
- relation_ids,
- relation_set,
- unit_get
-)
-
-from charmhelpers.core.host import (
- restart_on_change,
- service_restart
-)
-
-from charmhelpers.fetch import (
- apt_install,
- apt_upgrade,
- configure_sources
-)
-
-from contrail_analytics_utils import (
- CONTRAIL_VERSION,
- cassandra_units,
- fix_hostname,
- fix_nodemgr,
- fix_permissions,
- fix_services,
- kafka_units,
- provision_analytics,
- units,
- unprovision_analytics,
- write_alarm_config,
- write_analytics_api_config,
- write_collector_config,
- write_nodemgr_config,
- write_query_engine_config,
- write_snmp_collector_config,
- write_topology_config,
- write_vnc_api_config,
- write_keystone_auth_config
-)
-
-PACKAGES = [ "contrail-analytics", "contrail-utils", "contrail-nodemgr",
- "python-jinja2" ]
-
-hooks = Hooks()
-config = config()
-
-def add_analytics():
- # check relation dependencies
- if not config.get("analytics-configured") \
- and config.get("cassandra-ready") \
- and config.get("kafka-ready") \
- and config.get("zookeeper-ready") \
- and config.get("contrail-api-ready") \
- and config.get("contrail-discovery-ready") \
- and config.get("identity-admin-ready"):
- # provision analytics on 3.0.2.0+
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- provision_analytics()
- config["analytics-configured"] = True
-
-@hooks.hook("cassandra-relation-changed")
-def cassandra_changed():
- if not relation_get("native_transport_port"):
- log("Relation not ready")
- return
- if not config.get("cassandra-ready"):
- units = len(cassandra_units())
- required = config["cassandra-units"]
- if units < required:
- log("{} cassandra unit(s) ready, require {} more".format(units, required - units))
- return
- config["cassandra-ready"] = True
- cassandra_relation()
- add_analytics()
-
-@hooks.hook("cassandra-relation-departed")
-@hooks.hook("cassandra-relation-broken")
-def cassandra_departed():
- if not units("cassandra"):
- remove_analytics()
- config["cassandra-ready"] = False
- cassandra_relation()
-
-@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
- "/etc/contrail/contrail-query-engine.conf": ["contrail-query-engine"],
- "/etc/contrail/contrail-analytics-api.conf": ["contrail-analytics-api"]})
-def cassandra_relation():
- write_collector_config()
- write_query_engine_config()
- write_analytics_api_config()
-
-@hooks.hook("config-changed")
-def config_changed():
- vip = config.get("vip")
- for rid in relation_ids("contrail-analytics-api"):
- relation_set(relation_id=rid, vip=vip)
-
-@hooks.hook("contrail-analytics-api-relation-joined")
-def contrail_analytics_api_joined():
- relation_set(port=8081, vip=config.get("vip"))
-
-@hooks.hook("contrail-api-relation-changed")
-def contrail_api_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- contrail_api_relation()
- config["contrail-api-ready"] = True
- add_analytics()
-
-@hooks.hook("contrail-api-relation-departed")
-@hooks.hook("contrail-api-relation-broken")
-def contrail_api_departed():
- if not units("contrail-api"):
- remove_analytics()
- config["contrail-api-ready"] = False
- contrail_api_relation()
-
-@restart_on_change({"/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"],
- "/etc/contrail/vnc_api_lib.ini": ["contrail-topology"]})
-def contrail_api_relation():
- write_snmp_collector_config()
- write_vnc_api_config()
- write_analytics_api_config()
-
-@hooks.hook("contrail-discovery-relation-changed")
-def contrail_discovery_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- contrail_discovery_relation()
- config["contrail-discovery-ready"] = True
- add_analytics()
-
-@hooks.hook("contrail-discovery-relation-departed")
-@hooks.hook("contrail-discovery-relation-broken")
-def contrail_discovery_departed():
- if not units("contrail-discovery"):
- remove_analytics()
- config["contrail-discovery-ready"] = False
- contrail_discovery_relation()
-
-@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
- "/etc/contrail/contrail-query-engine.conf": ["contrail-query-engine"],
- "/etc/contrail/contrail-analytics-api.conf": ["contrail-analytics-api"],
- "/etc/contrail/contrail-alarm-gen.conf": ["contrail-alarm-gen"],
- "/etc/contrail/contrail-topology.conf": ["contrail-topology"],
- "/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"],
- "/etc/contrail/contrail-analytics-nodemgr.conf": ["contrail-analytics-nodemgr"]})
-def contrail_discovery_relation():
- write_collector_config()
- write_query_engine_config()
- write_analytics_api_config()
- write_alarm_config()
- write_topology_config()
- write_snmp_collector_config()
- write_nodemgr_config()
-
-@hooks.hook("http-services-relation-joined")
-def http_services_joined():
- name = local_unit().replace("/", "-")
- addr = gethostbyname(unit_get("private-address"))
- services = [ { "service_name": "contrail-analytics-api",
- "service_host": "0.0.0.0",
- "service_port": 8081,
- "service_options": [ "mode http", "balance leastconn", "option httpchk GET /analytics HTTP/1.0" ],
- "servers": [ [ name, addr, 8081, "check" ] ] } ]
- relation_set(services=yaml.dump(services))
-
-@hooks.hook("identity-admin-relation-changed")
-def identity_admin_changed():
- if not relation_get("service_hostname"):
- log("Relation not ready")
- return
- identity_admin_relation()
- config["identity-admin-ready"] = True
- add_analytics()
-
-@hooks.hook("identity-admin-relation-departed")
-@hooks.hook("identity-admin-relation-broken")
-def identity_admin_departed():
- if not units("identity-admin"):
- remove_analytics()
- config["identity-admin-ready"] = False
- identity_admin_relation()
-
-@restart_on_change({"/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"],
- "/etc/contrail/vnc_api_lib.ini": ["contrail-topology"],
- "/etc/contrail/contrail-keystone-auth.conf": ["contrail-keystone-auth"]})
-def identity_admin_relation():
- write_snmp_collector_config()
- write_vnc_api_config()
- write_keystone_auth_config()
-
-@hooks.hook()
-def install():
- fix_hostname()
- shutil.copy('files/contrail', '/etc/apt/preferences.d')
- configure_installation_source(config["openstack-origin"])
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- apt_install(PACKAGES, fatal=True)
- fix_permissions()
- fix_services()
- fix_nodemgr()
-
-@hooks.hook("kafka-relation-changed")
-def kafka_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- if not config.get("kafka-ready"):
- units = len(kafka_units())
- required = config["kafka-units"]
- if units < required:
- log("{} kafka unit(s) ready, require {} more".format(units, required - units))
- return
- config["kafka-ready"] = True
- kafka_relation()
- add_analytics()
-
-@hooks.hook("kafka-relation-departed")
-@hooks.hook("kafka-relation-broken")
-def kafka_departed():
- if not units("kafka"):
- remove_analytics()
- config["kafka-ready"] = False
- kafka_relation()
-
-@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
- "/etc/contrail/contrail-alarm-gen.conf": ["contrail-alarm-gen"]})
-def kafka_relation():
- write_collector_config()
- write_alarm_config()
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
-
-def remove_analytics():
- if config.get("analytics-configured"):
- # unprovision analytics on 3.0.2.0+
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- unprovision_analytics()
- config["analytics-configured"] = False
-
-@hooks.hook("upgrade-charm")
-def upgrade_charm():
- write_collector_config()
- write_query_engine_config()
- write_analytics_api_config()
- write_alarm_config()
- write_topology_config()
- write_snmp_collector_config()
- write_vnc_api_config()
- write_nodemgr_config()
- service_restart("supervisor-analytics")
-
-@hooks.hook("zookeeper-relation-changed")
-def zookeeper_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- zookeeper_relation()
- config["zookeeper-ready"] = True
- add_analytics()
-
-@hooks.hook("zookeeper-relation-departed")
-@hooks.hook("zookeeper-relation-broken")
-def zookeeper_departed():
- if not units("zookeeper"):
- remove_analytics()
- config["zookeeper-ready"] = False
- zookeeper_relation()
-
-@restart_on_change({"/etc/contrail/contrail-collector.conf": ["contrail-collector"],
- "/etc/contrail/contrail-alarm-gen.conf": ["contrail-alarm-gen"],
- "/etc/contrail/contrail-topology.conf": ["contrail-topology"],
- "/etc/contrail/contrail-snmp-collector.conf": ["contrail-snmp-collector"]})
-def zookeeper_relation():
- write_collector_config()
- write_alarm_config()
- write_topology_config()
- write_snmp_collector_config()
-
-if __name__ == "__main__":
- main()
diff --git a/charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py b/charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py
deleted file mode 100644
index 86a6765..0000000
--- a/charms/trusty/contrail-analytics/hooks/contrail_analytics_utils.py
+++ /dev/null
@@ -1,318 +0,0 @@
-import os
-import pwd
-import shutil
-from socket import gaierror, gethostbyname, gethostname
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output
-)
-
-import apt_pkg
-from apt_pkg import version_compare
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- related_units,
- relation_get,
- relation_ids,
- relation_type,
- remote_unit,
- unit_get
-)
-
-from charmhelpers.core.host import service_restart
-
-from charmhelpers.core.templating import render
-
-apt_pkg.init()
-
-def dpkg_version(pkg):
- try:
- return check_output(["dpkg-query", "-f", "${Version}\\n", "-W", pkg]).rstrip()
- except CalledProcessError:
- return None
-
-CONTRAIL_VERSION = dpkg_version("contrail-analytics")
-
-config = config()
-
-def contrail_ctx():
- return { "host_ip": gethostbyname(unit_get("private-address")) }
-
-def cassandra_ctx():
- key = "native_transport_port" \
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0 \
- else "rpc_port"
- servers = [ gethostbyname(relation_get("private-address", unit, rid))
- + ":" + port
- for rid in relation_ids("cassandra")
- for unit, port in
- ((unit, relation_get(key, unit, rid)) for unit in related_units(rid))
- if port ] \
- if config.get("cassandra-ready") else []
- return { "cassandra_servers": servers }
-
-def cassandra_units():
- """Return a list of cassandra units"""
- return [ unit for rid in relation_ids("cassandra")
- for unit in related_units(rid)
- if relation_get("native_transport_port", unit, rid) ]
-
-def contrail_api_ctx():
- ctxs = [ { "api_server": vip if vip \
- else gethostbyname(relation_get("private-address", unit, rid)),
- "api_port": port }
- for rid in relation_ids("contrail-api")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def discovery_ctx():
- ctxs = [ { "disc_server_ip": vip if vip \
- else gethostbyname(relation_get("private-address", unit, rid)),
- "disc_server_port": port }
- for rid in relation_ids("contrail-discovery")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def fix_hostname():
- # ensure hostname is resolvable
- hostname = gethostname()
- try:
- gethostbyname(hostname)
- except gaierror:
- check_call(["sed", "-E", "-i", "-e",
- "/127.0.0.1[[:blank:]]+/a \\\n127.0.1.1 " + hostname,
- "/etc/hosts"])
-
-def fix_nodemgr():
- # add files missing from contrail-nodemgr package
- shutil.copy("files/contrail-nodemgr-analytics.ini",
- "/etc/contrail/supervisord_analytics_files")
- shutil.copy("files/contrail-analytics-api.ini",
- "/etc/contrail/supervisord_analytics_files")
- shutil.copy("files/contrail-collector.ini",
- "/etc/contrail/supervisord_analytics_files")
- shutil.copy("files/contrail-alarm-gen.ini",
- "/etc/contrail/supervisord_analytics_files")
- shutil.copy("files/contrail-topology.ini",
- "/etc/contrail/supervisord_analytics_files")
- shutil.copy("files/contrail-snmp-collector.ini",
- "/etc/contrail/supervisord_analytics_files")
- pw = pwd.getpwnam("contrail")
- os.chown("/etc/contrail/supervisord_analytics_files/contrail-nodemgr-analytics.ini",
- pw.pw_uid, pw.pw_gid)
- shutil.copy("files/contrail-analytics-nodemgr", "/etc/init.d")
- os.chmod("/etc/init.d/contrail-analytics-nodemgr", 0755)
-
- # fake ntp status when inside a container
- if is_container():
- shutil.copy("files/ntpq-nodemgr", "/usr/local/bin/ntpq")
-
- service_restart("supervisor-analytics")
-
-def fix_permissions():
- os.chmod("/etc/contrail", 0755)
- os.chown("/etc/contrail", 0, 0)
-
-def fix_services():
- # redis listens on localhost by default
- check_output(["sed", "-i", "-e",
- "s/^bind /# bind /",
- "/etc/redis/redis.conf"])
- service_restart("redis-server")
-
-def identity_admin_ctx():
- ctxs = [ { "auth_host": gethostbyname(hostname),
- "auth_port": relation_get("service_port", unit, rid),
- "admin_user": relation_get("service_username", unit, rid),
- "admin_password": relation_get("service_password", unit, rid),
- "admin_tenant_name": relation_get("service_tenant_name", unit, rid) }
- for rid in relation_ids("identity-admin")
- for unit, hostname in
- ((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
- if hostname ]
- return ctxs[0] if ctxs else {}
-
-def is_container():
- """Return boolean determining if inside container"""
- try:
- check_call(["running-in-container"])
- return True
- except CalledProcessError:
- return False
-
-def kafka_ctx():
- servers = [ gethostbyname(relation_get("private-address", unit, rid))
- + ":" + port
- for rid in relation_ids("kafka")
- for unit, port in
- ((unit, relation_get("port", unit, rid))
- for unit in related_units(rid))
- if port ] \
- if config.get("kafka-ready") else []
- return { "kafka_servers": servers }
-
-def kafka_units():
- """Return a list of kafka units"""
- return [ unit for rid in relation_ids("kafka")
- for unit in related_units(rid)
- if relation_get("port", unit, rid) ]
-
-def provision_analytics():
- hostname = gethostname()
- ip = gethostbyname(unit_get("private-address"))
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- port)
- for rid in relation_ids("contrail-api")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ][0]
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid)
- if relation_get("service_hostname", unit, rid) ][0]
- log("Provisioning analytics {}".format(ip))
- check_call(["contrail-provision-analytics",
- "--host_name", hostname,
- "--host_ip", ip,
- "--api_server_ip", api_ip,
- "--api_server_port", str(api_port),
- "--oper", "add",
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant_name", tenant])
-
-def units(relation):
- """Return a list of units for the specified relation"""
- return [ unit for rid in relation_ids(relation)
- for unit in related_units(rid) ]
-
-def unprovision_analytics():
- if not remote_unit():
- return
- hostname = gethostname()
- ip = gethostbyname(unit_get("private-address"))
- relation = relation_type()
- api_ip = None
- api_port = None
- if relation == "contrail-api":
- api_ip = gethostbyname(relation_get("private-address"))
- api_port = relation_get("port")
- else:
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- relation_get("port", unit, rid))
- for rid in relation_ids("contrail-api")
- for unit in related_units(rid) ][0]
- user = None
- password = None
- tenant = None
- if relation == "identity-admin":
- user = relation_get("service_username")
- password = relation_get("service_password")
- tenant = relation_get("service_tenant_name")
- else:
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Unprovisioning analytics {}".format(ip))
- check_call(["contrail-provision-analytics",
- "--host_name", hostname,
- "--host_ip", ip,
- "--api_server_ip", api_ip,
- "--api_server_port", str(api_port),
- "--oper", "del",
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant_name", tenant])
-
-def write_alarm_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(kafka_ctx())
- ctx.update(zookeeper_ctx())
- ctx.update(discovery_ctx())
- render("contrail-alarm-gen.conf",
- "/etc/contrail/contrail-alarm-gen.conf", ctx)
-
-def write_analytics_api_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(cassandra_ctx())
- ctx.update(discovery_ctx())
- ctx.update(contrail_api_ctx())
- render("contrail-analytics-api.conf",
- "/etc/contrail/contrail-analytics-api.conf", ctx)
-
-def write_collector_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(cassandra_ctx())
- ctx.update(kafka_ctx())
- ctx.update(discovery_ctx())
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- ctx["zookeeper"] = True
- ctx.update(zookeeper_ctx())
- render("contrail-collector.conf",
- "/etc/contrail/contrail-collector.conf", ctx)
-
-def write_nodemgr_config():
- ctx = discovery_ctx()
- render("contrail-analytics-nodemgr.conf",
- "/etc/contrail/contrail-analytics-nodemgr.conf", ctx)
-
-def write_query_engine_config():
- ctx = {}
- ctx.update(cassandra_ctx())
- ctx.update(discovery_ctx())
- render("contrail-query-engine.conf",
- "/etc/contrail/contrail-query-engine.conf", ctx)
-
-def write_snmp_collector_config():
- ctx = {}
- ctx.update(contrail_api_ctx())
- ctx.update(zookeeper_ctx())
- ctx.update(discovery_ctx())
- ctx.update(identity_admin_ctx())
- render("contrail-snmp-collector.conf",
- "/etc/contrail/contrail-snmp-collector.conf", ctx, "root",
- "contrail", 0440)
-
-def write_topology_config():
- ctx = {}
- ctx.update(zookeeper_ctx())
- ctx.update(discovery_ctx())
- render("contrail-topology.conf",
- "/etc/contrail/contrail-topology.conf", ctx)
-
-def write_vnc_api_config():
- ctx = {}
- ctx.update(contrail_api_ctx())
- ctx.update(identity_admin_ctx())
- render("vnc_api_lib.ini", "/etc/contrail/vnc_api_lib.ini", ctx)
-
-def write_keystone_auth_config():
- ctx = {}
- ctx.update(contrail_api_ctx())
- ctx.update(identity_admin_ctx())
- render("contrail-keystone-auth.conf",
- "/etc/contrail/contrail-keystone-auth.conf", ctx)
-
-def zookeeper_ctx():
- return { "zk_servers": [ gethostbyname(relation_get("private-address", unit, rid))
- + ":" + port
- for rid in relation_ids("zookeeper")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ] }
diff --git a/charms/trusty/contrail-analytics/hooks/http-services-relation-joined b/charms/trusty/contrail-analytics/hooks/http-services-relation-joined
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/http-services-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed b/charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/identity-admin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/install b/charms/trusty/contrail-analytics/hooks/install
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/kafka-relation-broken b/charms/trusty/contrail-analytics/hooks/kafka-relation-broken
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/kafka-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/kafka-relation-changed b/charms/trusty/contrail-analytics/hooks/kafka-relation-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/kafka-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/kafka-relation-departed b/charms/trusty/contrail-analytics/hooks/kafka-relation-departed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/kafka-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/start b/charms/trusty/contrail-analytics/hooks/start
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/stop b/charms/trusty/contrail-analytics/hooks/stop
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/upgrade-charm b/charms/trusty/contrail-analytics/hooks/upgrade-charm
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed b/charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed
deleted file mode 120000
index f633cfc..0000000
--- a/charms/trusty/contrail-analytics/hooks/zookeeper-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_analytics_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-analytics/icon.svg b/charms/trusty/contrail-analytics/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/contrail-analytics/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/contrail-analytics/metadata.yaml b/charms/trusty/contrail-analytics/metadata.yaml
deleted file mode 100644
index 8c33069..0000000
--- a/charms/trusty/contrail-analytics/metadata.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-name: contrail-analytics
-summary: OpenContrail Analytics Node
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- OpenContrail is a network virtualization solution that provides an overlay
- virtual-network to virtual-machines, containers or network namespaces.
- .
- This charm provides the analytics node component.
-categories:
- - openstack
-provides:
- contrail-analytics-api:
- interface: contrail-analytics-api
- http-services:
- interface: http
-requires:
- cassandra:
- interface: cassandra
- contrail-api:
- interface: contrail-api
- contrail-discovery:
- interface: contrail-discovery
- identity-admin:
- interface: keystone-admin
- kafka:
- interface: kafka
- zookeeper:
- interface: zookeeper
diff --git a/charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf b/charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf
deleted file mode 100644
index 034c7d9..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-alarm-gen.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-host_ip = {{ host_ip }}
-collectors = 127.0.0.1:8086
-http_server_port = 5995
-log_local = 1
-log_level = SYS_NOTICE
-log_file = /var/log/contrail/contrail-alarm-gen.log
-kafka_broker_list = {{ kafka_servers|join(" ") }}
-zk_list = {{ zk_servers|join(",") }}
-
-[DISCOVERY]
-disc_server_ip = {{ disc_server_ip }}
-disc_server_port = {{ disc_server_port }}
-
-[REDIS]
-redis_server_port = 6379
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf b/charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf
deleted file mode 100644
index 3f72020..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-analytics-api.conf
+++ /dev/null
@@ -1,26 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-host_ip = {{ host_ip }}
-collectors = 127.0.0.1:8086
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-http_server_port = 8090
-rest_api_port = 8081
-rest_api_ip = 0.0.0.0
-log_local = 1
-log_level = SYS_NOTICE
-log_file = /var/log/contrail/contrail-analytics-api.log
-api_server = {{ api_server }}:{{ api_port }}
-
-[DISCOVERY]
-disc_server_ip = {{ disc_server_ip }}
-disc_server_port = {{ disc_server_port }}
-
-[REDIS]
-server = 127.0.0.1
-redis_server_port = 6379
-redis_query_port = 6379
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf b/charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf
deleted file mode 100644
index 5bf7c03..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-analytics-nodemgr.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DISCOVERY]
-server = {{ disc_server_ip }}
-port = {{ disc_server_port }}
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-collector.conf b/charms/trusty/contrail-analytics/templates/contrail-collector.conf
deleted file mode 100644
index 5b2eeda..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-collector.conf
+++ /dev/null
@@ -1,31 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULT]
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-{%- if zookeeper %}
-zookeeper_server_list = {{ zk_servers|join(",") }}
-{%- endif %}
-kafka_broker_list = {{ kafka_servers|join(" ") }}
-hostip = {{ host_ip }}
-http_server_port = 8089
-log_file = /var/log/contrail/contrail-collector.log
-log_level = SYS_NOTICE
-log_local = 1
-syslog_port = 514
-sflow_port = 6343
-
-[COLLECTOR]
-port = 8086
-server = 0.0.0.0
-
-[DISCOVERY]
-port = {{ disc_server_port }}
-server = {{ disc_server_ip }}
-
-[REDIS]
-port = 6379
-server = 127.0.0.1
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf b/charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf
deleted file mode 100644
index 7e10256..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-keystone-auth.conf
+++ /dev/null
@@ -1,11 +0,0 @@
-[KEYSTONE]
-auth_url=http://{{ auth_host }}:{{ auth_port }}/v2.0
-auth_host = {{ auth_host }}
-auth_protocol = http
-auth_port = {{ auth_port }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-memcache_servers=127.0.0.1:11211
-insecure=False
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-query-engine.conf b/charms/trusty/contrail-analytics/templates/contrail-query-engine.conf
deleted file mode 100644
index 6cba023..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-query-engine.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULT]
-analytics_data_ttl = 48
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-collectors =
-http_server_port = 8091
-log_file = /var/log/contrail/contrail-query-engine.log
-log_level = SYS_NOTICE
-log_local = 1
-max_slice = 100
-max_tasks = 16
-
-[DISCOVERY]
-port = {{ disc_server_port }}
-server = {{ disc_server_ip }}
-
-[REDIS]
-port = 6379
-server = 127.0.0.1
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf b/charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf
deleted file mode 100644
index e455b9a..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-snmp-collector.conf
+++ /dev/null
@@ -1,28 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-api_server = {{ api_server ~ ":" ~ api_port if api_server and api_port }}
-collectors = 127.0.0.1:8086
-fast_scan_frequency = 60
-http_server_port = 5920
-log_local = 1
-log_level = SYS_NOTICE
-log_file = /var/log/contrail/contrail-snmp-collector.log
-scan_frequency = 600
-zookeeper = {{ zk_servers|join(",") }}
-
-[DISCOVERY]
-disc_server_ip = {{ disc_server_ip }}
-disc_server_port = {{ disc_server_port }}
-
-[KEYSTONE]
-auth_host = {{ auth_host }}
-auth_protocol = http
-auth_port = {{ auth_port }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-
diff --git a/charms/trusty/contrail-analytics/templates/contrail-topology.conf b/charms/trusty/contrail-analytics/templates/contrail-topology.conf
deleted file mode 100644
index 18177fa..0000000
--- a/charms/trusty/contrail-analytics/templates/contrail-topology.conf
+++ /dev/null
@@ -1,19 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-analytics_api = 127.0.0.1:8081
-collectors = 127.0.0.1:8086
-http_server_port = 5921
-log_local = 1
-log_level = SYS_NOTICE
-log_file = /var/log/contrail/contrail-topology.log
-scan_frequency = 60
-zookeeper = {{ zk_servers|join(",") }}
-
-[DISCOVERY]
-disc_server_ip = {{ disc_server_ip }}
-disc_server_port = {{ disc_server_port }}
-
diff --git a/charms/trusty/contrail-analytics/templates/vnc_api_lib.ini b/charms/trusty/contrail-analytics/templates/vnc_api_lib.ini
deleted file mode 100644
index fd68487..0000000
--- a/charms/trusty/contrail-analytics/templates/vnc_api_lib.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[global]
-WEB_SERVER = {{ api_server }}
-WEB_PORT = {{ api_port }}
-
-[auth]
-AUTHN_TYPE = keystone
-AUTHN_PROTOCOL = http
-AUTHN_SERVER = {{ auth_host }}
-AUTHN_PORT = {{ auth_port }}
-AUTHN_URL = /v2.0/tokens
-
diff --git a/charms/trusty/contrail-configuration/.bzrignore b/charms/trusty/contrail-configuration/.bzrignore
deleted file mode 100644
index ba077a4..0000000
--- a/charms/trusty/contrail-configuration/.bzrignore
+++ /dev/null
@@ -1 +0,0 @@
-bin
diff --git a/charms/trusty/contrail-configuration/Makefile b/charms/trusty/contrail-configuration/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/contrail-configuration/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/contrail-configuration/README.md b/charms/trusty/contrail-configuration/README.md
deleted file mode 100644
index ae21000..0000000
--- a/charms/trusty/contrail-configuration/README.md
+++ /dev/null
@@ -1,111 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This charm provides the configuration node component which includes
-contrail-api, contrail-schema, contrail-discovery and ifmap-server services.
-Only OpenStack Icehouse or newer is supported.
-Juju 1.23.2+ required.
-
-Usage
------
-
-Cassandra, Zookeeper, RabbitMQ and Keystone are prerequisite services to deploy.
-Once ready, deploy and relate as follows:
-
- juju deploy contrail-configuration
- juju add-relation contrail-configuration:cassandra cassandra:database
- juju add-relation contrail-configuration zookeeper
- juju add-relation contrail-configuration rabbitmq-server
- juju add-relation contrail-configuration keystone
-
-After deployment, relate to neutron-api-contrail:
-
- juju add-relation neutron-api-contrail contrail-configuration
-
-Install Sources
----------------
-
-The version of OpenContrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
-
-The version of dependent OpenStack components installed when deploying can be
-changed using the 'openstack-origin' option. When deploying to different
-OpenStack versions, openstack-origin needs to be set across all OpenStack and
-OpenContrail charms where available.
-
-Floating IP Pools
------------------
-
-To use OpenStack floating IP functionality, floating IP pools must be created
-and activated. Creation of multiple pools for multiple projects is supported
-using the 'floating-ip-pools' option.
-
-A value is specified as a YAML encoded string, indicating one or more pools
-using a list of maps, where each map consists of the following attributes:
-
- project - project name
- network - network name
- pool-name - floating pool name
- target-projects - list of projects allowed to use pool
-
-For example to create a floating ip pool named 'floatingip_pool' on
-'admin:public' network and allow 'admin' project to use:
-
- juju set contrail-configuration \
- "floating-ip-pools=[ { project: admin, network: public, pool-name: floatingip_pool, target-projects: [ admin ] } ]"
-
-Previously specified pools will be deactivated and removed.
-
-Nova Metadata
--------------
-
-To use Nova Metadata with Nova Compute instances, a metadata service must first
-be registered. Registration allows OpenContrail to create the appropriate
-network config to proxy requests from instances to a nova-api service on the
-network.
-
-Relating to a charm implementing neutron-metadata interface will register a
-linklocal metadata service:
-
- juju add-relation contrail-configuration neutron-metadata-charm
-
-neutron-contrail charm also needs to be related to the same charm to use correct
-configuration:
-
- juju add-relation neutron-contrail neutron-metadata-charm
-
-*NOTE: neutron-contrail runs and registers its own nova-api-metadata service
-on each Compute node by default ('local-metadata-server' option), so using
-neutron-metadata relation isn't necessary unless you need more control over
-deployment.*
-
-High Availability (HA)
-----------------------
-
-Multiple units of this charm can be deployed to support HA deployments:
-
- juju add-unit contrail-configuration
-
-Relating to haproxy charm (http-services relation) allows multiple units to be
-load balanced:
-
- juju add-relation contrail-configuration haproxy
-
-Setting the 'vip' option instructs related charms to use IP address specified
-for accessing the configuration node:
-
- juju set contrail-configuration vip=x.x.x.x
-
-When load balancing with HAProxy you would set vip to the IP address of the
-deployed haproxy charm (or a shared Virtual IP address if doing clustered
-HAProxy).
diff --git a/charms/trusty/contrail-configuration/charm-helpers-sync.yaml b/charms/trusty/contrail-configuration/charm-helpers-sync.yaml
deleted file mode 100644
index eadff82..0000000
--- a/charms/trusty/contrail-configuration/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
- - contrib.network
- - contrib.openstack|inc=*
- - contrib.python
- - contrib.storage
diff --git a/charms/trusty/contrail-configuration/config.yaml b/charms/trusty/contrail-configuration/config.yaml
deleted file mode 100644
index 3e49e28..0000000
--- a/charms/trusty/contrail-configuration/config.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-options:
- install-sources:
- type: string
- default: |
- - "ppa:opencontrail/ppa"
- - "ppa:opencontrail/r2.20"
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
- openstack-origin:
- type: string
- default: distro
- description: |
- Repository from which to install. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported Cloud Archive release pocket.
-
- Supported Cloud Archive sources include: cloud:precise-folsom,
- cloud:precise-folsom/updates, cloud:precise-folsom/staging,
- cloud:precise-folsom/proposed.
- floating-ip-pools:
- type: string
- description: |
- Floating IP pools to create.
-
- Using a YAML encoded string specify one or more floating IP pools using a
- list of maps, where each map consists of the following attributes:
-
- project - project name
- network - network name
- pool-name - floating pool name
- target-projects - list of projects allowed to use pool
-
- For example:
-
- // create a floating ip pool named floatingip_pool on admin:public network
- // and allow admin project to use
- [ { project: admin, network: public, pool-name: floatingip_pool, target-projects: [ admin ] } ]
- vip:
- type: string
- description: |
- Virtual IP address to use when services are related in a High Availability
- configuration.
- cassandra-units:
- type: int
- default: 1
- description: Minimum number of units required in cassandra relation
diff --git a/charms/trusty/contrail-configuration/copyright b/charms/trusty/contrail-configuration/copyright
deleted file mode 100644
index 4081144..0000000
--- a/charms/trusty/contrail-configuration/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2014, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/files/contrail-config-nodemgr b/charms/trusty/contrail-configuration/files/contrail-config-nodemgr
deleted file mode 100644
index ce0c164..0000000
--- a/charms/trusty/contrail-configuration/files/contrail-config-nodemgr
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network Configuration Node Manager
-
-supervisorctl -s unix:///tmp/supervisord_config.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-configuration/files/contrail-nodemgr-config.ini b/charms/trusty/contrail-configuration/files/contrail-nodemgr-config.ini
deleted file mode 100644
index ef07ff0..0000000
--- a/charms/trusty/contrail-configuration/files/contrail-nodemgr-config.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[eventlistener:contrail-config-nodemgr]
-command=/bin/bash -c "exec python /usr/bin/contrail-nodemgr --nodetype=contrail-config"
-events=PROCESS_COMMUNICATION,PROCESS_STATE,TICK_60
-buffer_size=10000
-stdout_logfile=/var/log/contrail/contrail-config-nodemgr-stdout.log
-stderr_logfile=/var/log/contrail/contrail-config-nodemgr-stderr.log
diff --git a/charms/trusty/contrail-configuration/files/ifmap b/charms/trusty/contrail-configuration/files/ifmap
deleted file mode 100644
index d5202b0..0000000
--- a/charms/trusty/contrail-configuration/files/ifmap
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network Virtualization API
-
-supervisorctl -s unix:///tmp/supervisord_config.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-configuration/files/ifmap.ini b/charms/trusty/contrail-configuration/files/ifmap.ini
deleted file mode 100644
index 360af2a..0000000
--- a/charms/trusty/contrail-configuration/files/ifmap.ini
+++ /dev/null
@@ -1,12 +0,0 @@
-[program:ifmap]
-command=/usr/bin/ifmap-server
-priority=420
-autostart=true
-autorestart=true
-killasgroup=true
-stopasgroup=true
-stopsignal=TERM
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/ifmap-stdout.log
-stderr_logfile=/dev/null
-user=contrail
diff --git a/charms/trusty/contrail-configuration/files/ntpq-nodemgr b/charms/trusty/contrail-configuration/files/ntpq-nodemgr
deleted file mode 100755
index da00247..0000000
--- a/charms/trusty/contrail-configuration/files/ntpq-nodemgr
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-#
-# Script to produce some dummy output to satisfy contrail-nodemgr ntp status
-# Note: This is intended to be deployed inside containers where the host is running ntp
-
-if [ -x /usr/bin/ntpq ]; then
- exec /usr/bin/ntpq "$@"
-fi
-
-echo "*"
diff --git a/charms/trusty/contrail-configuration/hooks/amqp-relation-broken b/charms/trusty/contrail-configuration/hooks/amqp-relation-broken
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/amqp-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/amqp-relation-changed b/charms/trusty/contrail-configuration/hooks/amqp-relation-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/amqp-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/amqp-relation-departed b/charms/trusty/contrail-configuration/hooks/amqp-relation-departed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/amqp-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/amqp-relation-joined b/charms/trusty/contrail-configuration/hooks/amqp-relation-joined
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/amqp-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/cassandra-relation-broken b/charms/trusty/contrail-configuration/hooks/cassandra-relation-broken
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/cassandra-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/cassandra-relation-changed b/charms/trusty/contrail-configuration/hooks/cassandra-relation-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/cassandra-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/cassandra-relation-departed b/charms/trusty/contrail-configuration/hooks/cassandra-relation-departed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/cassandra-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ip.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index 7f3b66b..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,456 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.core.hookenv import unit_get
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network: %s" % network)
- raise ValueError(errmsg)
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
-
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any."""
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4 address
- socket.inet_aton(address)
- return True
- except socket.error:
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- apt_install('python-dnspython')
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- answers = dns.resolver.query(address, rtype)
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- apt_install("python-dnspython")
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ovs/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ovs/__init__.py
deleted file mode 100644
index 77e2db7..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helpers for interacting with OpenvSwitch '''
-import subprocess
-import os
-from charmhelpers.core.hookenv import (
- log, WARNING
-)
-from charmhelpers.core.host import (
- service
-)
-
-
-def add_bridge(name):
- ''' Add the named bridge to openvswitch '''
- log('Creating bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
-
-
-def del_bridge(name):
- ''' Delete the named bridge from openvswitch '''
- log('Deleting bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
-
-
-def add_bridge_port(name, port, promisc=False):
- ''' Add a port to the named openvswitch bridge '''
- log('Adding port {} to bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "up"])
- if promisc:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
- else:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def del_bridge_port(name, port):
- ''' Delete a port from the named openvswitch bridge '''
- log('Deleting port {} from bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "down"])
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def set_manager(manager):
- ''' Set the controller for the local openvswitch '''
- log('Setting manager for local ovs to {}'.format(manager))
- subprocess.check_call(['ovs-vsctl', 'set-manager',
- 'ssl:{}'.format(manager)])
-
-
-CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
-
-
-def get_certificate():
- ''' Read openvswitch certificate from disk '''
- if os.path.exists(CERT_PATH):
- log('Reading ovs certificate from {}'.format(CERT_PATH))
- with open(CERT_PATH, 'r') as cert:
- full_cert = cert.read()
- begin_marker = "-----BEGIN CERTIFICATE-----"
- end_marker = "-----END CERTIFICATE-----"
- begin_index = full_cert.find(begin_marker)
- end_index = full_cert.rfind(end_marker)
- if end_index == -1 or begin_index == -1:
- raise RuntimeError("Certificate does not contain valid begin"
- " and end markers.")
- full_cert = full_cert[begin_index:(end_index + len(end_marker))]
- return full_cert
- else:
- log('Certificate not found', level=WARNING)
- return None
-
-
-def full_restart():
- ''' Full restart and reload of openvswitch '''
- if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
- service('start', 'openvswitch-force-reload-kmod')
- else:
- service('force-reload-kmod', 'openvswitch-switch')
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ufw.py
deleted file mode 100644
index b65d963..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/network/ufw.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module contains helpers to add and remove ufw rules.
-
-Examples:
-
-- open SSH port for subnet 10.0.3.0/24:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
-
-- open service by name as defined in /etc/services:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('ssh', 'open')
-
-- close service by port number:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('4949', 'close') # munin
-"""
-import re
-import os
-import subprocess
-
-from charmhelpers.core import hookenv
-from charmhelpers.core.kernel import modprobe, is_module_loaded
-
-__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
-
-
-class UFWError(Exception):
- pass
-
-
-class UFWIPv6Error(UFWError):
- pass
-
-
-def is_enabled():
- """
- Check if `ufw` is enabled
-
- :returns: True if ufw is enabled
- """
- output = subprocess.check_output(['ufw', 'status'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Status: active\n', output, re.M)
-
- return len(m) >= 1
-
-
-def is_ipv6_ok(soft_fail=False):
- """
- Check if IPv6 support is present and ip6tables functional
-
- :param soft_fail: If set to True and IPv6 support is broken, then reports
- that the host doesn't have IPv6 support, otherwise a
- UFWIPv6Error exception is raised.
- :returns: True if IPv6 is working, False otherwise
- """
-
- # do we have IPv6 in the machine?
- if os.path.isdir('/proc/sys/net/ipv6'):
- # is ip6tables kernel module loaded?
- if not is_module_loaded('ip6_tables'):
- # ip6tables support isn't complete, let's try to load it
- try:
- modprobe('ip6_tables')
- # great, we can load the module
- return True
- except subprocess.CalledProcessError as ex:
- hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
- level="WARN")
- # we are in a world where ip6tables isn't working
- if soft_fail:
- # so we inform that the machine doesn't have IPv6
- return False
- else:
- raise UFWIPv6Error("IPv6 firewall support broken")
- else:
- # the module is present :)
- return True
-
- else:
- # the system doesn't have IPv6
- return False
-
-
-def disable_ipv6():
- """
- Disable ufw IPv6 support in /etc/default/ufw
- """
- exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
- '/etc/default/ufw'])
- if exit_code == 0:
- hookenv.log('IPv6 support in ufw disabled', level='INFO')
- else:
- hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
- raise UFWError("Couldn't disable IPv6 support in ufw")
-
-
-def enable(soft_fail=False):
- """
- Enable ufw
-
- :param soft_fail: If set to True silently disables IPv6 support in ufw,
- otherwise a UFWIPv6Error exception is raised when IP6
- support is broken.
- :returns: True if ufw is successfully enabled
- """
- if is_enabled():
- return True
-
- if not is_ipv6_ok(soft_fail):
- disable_ipv6()
-
- output = subprocess.check_output(['ufw', 'enable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall('^Firewall is active and enabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be enabled", level='WARN')
- return False
- else:
- hookenv.log("ufw enabled", level='INFO')
- return True
-
-
-def disable():
- """
- Disable ufw
-
- :returns: True if ufw is successfully disabled
- """
- if not is_enabled():
- return True
-
- output = subprocess.check_output(['ufw', 'disable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Firewall stopped and disabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be disabled", level='WARN')
- return False
- else:
- hookenv.log("ufw disabled", level='INFO')
- return True
-
-
-def default_policy(policy='deny', direction='incoming'):
- """
- Changes the default policy for traffic `direction`
-
- :param policy: allow, deny or reject
- :param direction: traffic direction, possible values: incoming, outgoing,
- routed
- """
- if policy not in ['allow', 'deny', 'reject']:
- raise UFWError(('Unknown policy %s, valid values: '
- 'allow, deny, reject') % policy)
-
- if direction not in ['incoming', 'outgoing', 'routed']:
- raise UFWError(('Unknown direction %s, valid values: '
- 'incoming, outgoing, routed') % direction)
-
- output = subprocess.check_output(['ufw', 'default', policy, direction],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
- hookenv.log(output, level='DEBUG')
-
- m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
- policy),
- output, re.M)
- if len(m) == 0:
- hookenv.log("ufw couldn't change the default policy to %s for %s"
- % (policy, direction), level='WARN')
- return False
- else:
- hookenv.log("ufw default policy for %s changed to %s"
- % (direction, policy), level='INFO')
- return True
-
-
-def modify_access(src, dst='any', port=None, proto=None, action='allow',
- index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param action: `allow` or `delete`
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- if not is_enabled():
- hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
- return
-
- if action == 'delete':
- cmd = ['ufw', 'delete', 'allow']
- elif index is not None:
- cmd = ['ufw', 'insert', str(index), action]
- else:
- cmd = ['ufw', action]
-
- if src is not None:
- cmd += ['from', src]
-
- if dst is not None:
- cmd += ['to', dst]
-
- if port is not None:
- cmd += ['port', str(port)]
-
- if proto is not None:
- cmd += ['proto', proto]
-
- hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
-
- hookenv.log(stdout, level='INFO')
-
- if p.returncode != 0:
- hookenv.log(stderr, level='ERROR')
- hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
- p.returncode),
- level='ERROR')
-
-
-def grant_access(src, dst='any', port=None, proto=None, index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
- index=index)
-
-
-def revoke_access(src, dst='any', port=None, proto=None):
- """
- Revoke access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
-
-
-def service(name, action):
- """
- Open/close access to a service
-
- :param name: could be a service name defined in `/etc/services` or a port
- number.
- :param action: `open` or `close`
- """
- if action == 'open':
- subprocess.check_output(['ufw', 'allow', str(name)],
- universal_newlines=True)
- elif action == 'close':
- subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
- universal_newlines=True)
- else:
- raise UFWError(("'{}' not supported, use 'allow' "
- "or 'delete'").format(action))
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index ef77caf..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 722bc64..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None, stable=True):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty) = range(12)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index b139741..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,963 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-import novaclient.v1_1.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'tenantId': act.tenantId,
- 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- service_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
-# rabbitmq/amqp specific helpers:
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not port and not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.configure('rabbitmq-server', config)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.configure('rabbitmq-server', config)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/context.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index 1248d49..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1416 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call
-
-import six
-import yaml
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import get_host_ip
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-class OSContextError(Exception):
- pass
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- log("config_flags is not in expected format (key=value)", level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- log("Invalid config value(s) at index %s" % (i), level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- ctxt['stat_port'] = ':::8888'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
- ctxt['stat_port'] = ':8888'
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_config': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from subordinate_config '
- 'setting from %s' % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_config on %s but it contained'
- 'nothing for %s service' % (rid, service),
- level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_config on %s but it contained'
- 'nothing for %s' % (rid, self.config_file),
- level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- try:
- from psutil import NUM_CPUS
- except ImportError:
- apt_install('python-psutil', fatal=True)
- from psutil import NUM_CPUS
-
- return NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- ctxt = {"workers": self.num_cpus * multiplier}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {bridge: normalized[port] for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.values():
- ports = mappings.values()
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt["devs"] = '\\n'.join(ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
deleted file mode 100755
index eb8527f..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-CRITICAL=0
-NOTACTIVE=''
-LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
-do
- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
- if [ $? != 0 ]; then
- date >> $LOGFILE
- echo $output >> $LOGFILE
- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
- CRITICAL=1
- NOTACTIVE="${NOTACTIVE} $appserver"
- fi
-done
-
-if [ $CRITICAL = 1 ]; then
- echo "CRITICAL:${NOTACTIVE}"
- exit 2
-fi
-
-echo "OK: All haproxy instances looking good"
-exit 0
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
deleted file mode 100755
index 3ebb532..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-# These should be config options at some stage
-CURRQthrsh=0
-MAXQthrsh=100
-
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
-
-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
-do
- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
-
- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
- exit 2
- fi
-done
-
-echo "OK: All haproxy queue depths looking good"
-exit 0
-
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/ip.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 3dca6dc..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured.
-
- :param endpoint_type: Network endpoing type
- """
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- clustered = is_clustered()
- if clustered:
- if not net_addr:
- # If no net-splits defined, we expect a single vip
- resolved_address = vips[0]
- else:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- resolved_address = get_address_in_network(net_addr, fallback_addr)
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index 55b2037..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,356 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return ['openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [['plumgrid-lxc'],
- ['iovisor-dkms']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of port:bridge mappings.
-
- Returns dict of the form {port:bridge} where port may be an mac address or
- interface name.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
deleted file mode 100644
index b99851c..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# cinder configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[global]
-{% if auth -%}
-auth_supported = {{ auth }}
-keyring = /etc/ceph/$cluster.$name.keyring
-mon host = {{ mon_hosts }}
-{% endif -%}
-log to syslog = {{ use_syslog }}
-err to syslog = {{ use_syslog }}
-clog to syslog = {{ use_syslog }}
-
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/git.upstart
deleted file mode 100644
index 4bed404..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/git.upstart
+++ /dev/null
@@ -1,17 +0,0 @@
-description "{{ service_description }}"
-author "Juju {{ service_name }} Charm <juju@localhost>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-exec start-stop-daemon --start --chuid {{ user_name }} \
- --chdir {{ start_dir }} --name {{ process_name }} \
- --exec {{ executable_name }} -- \
- {% for config_file in config_files -%}
- --config-file={{ config_file }} \
- {% endfor -%}
- {% if log_file -%}
- --log-file={{ log_file }}
- {% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
deleted file mode 100644
index ad875f1..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ /dev/null
@@ -1,58 +0,0 @@
-global
- log {{ local_host }} local0
- log {{ local_host }} local1 notice
- maxconn 20000
- user haproxy
- group haproxy
- spread-checks 0
-
-defaults
- log global
- mode tcp
- option tcplog
- option dontlognull
- retries 3
- timeout queue 1000
- timeout connect 1000
-{% if haproxy_client_timeout -%}
- timeout client {{ haproxy_client_timeout }}
-{% else -%}
- timeout client 30000
-{% endif -%}
-
-{% if haproxy_server_timeout -%}
- timeout server {{ haproxy_server_timeout }}
-{% else -%}
- timeout server 30000
-{% endif -%}
-
-listen stats {{ stat_port }}
- mode http
- stats enable
- stats hide-version
- stats realm Haproxy\ Statistics
- stats uri /
- stats auth admin:password
-
-{% if frontends -%}
-{% for service, ports in service_ports.items() -%}
-frontend tcp-in_{{ service }}
- bind *:{{ ports[0] }}
- {% if ipv6 -%}
- bind :::{{ ports[0] }}
- {% endif -%}
- {% for frontend in frontends -%}
- acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
- use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
- {% endfor -%}
- default_backend {{ service }}_{{ default_backend }}
-
-{% for frontend in frontends -%}
-backend {{ service }}_{{ frontend }}
- balance leastconn
- {% for unit, address in frontends[frontend]['backends'].items() -%}
- server {{ unit }} {{ address }}:{{ ports[1] }} check
- {% endfor %}
-{% endfor -%}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
deleted file mode 100644
index ce28fa3..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
+++ /dev/null
@@ -1,24 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
deleted file mode 100644
index ce28fa3..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
deleted file mode 100644
index 2a37edd..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
deleted file mode 100644
index b444c9c..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-[oslo_messaging_rabbit]
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
deleted file mode 100644
index 95f1a76..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
+++ /dev/null
@@ -1,14 +0,0 @@
-{% if zmq_host -%}
-# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
-rpc_backend = zmq
-rpc_zmq_host = {{ zmq_host }}
-{% if zmq_redis_address -%}
-rpc_zmq_matchmaker = redis
-matchmaker_heartbeat_freq = 15
-matchmaker_heartbeat_ttl = 30
-[matchmaker_redis]
-host = {{ zmq_redis_address }}
-{% else -%}
-rpc_zmq_matchmaker = ring
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templating.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index e5e3cb1..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/utils.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 24b998d..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,926 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-
-import six
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- config,
- log as juju_log,
- charm_dir,
- INFO,
- relation_ids,
- relation_set,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import lsb_release, mounts, umount
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
-])
-
-# The ugly duckling
-SWIFT_CODENAMES = OrderedDict([
- ('1.4.3', 'diablo'),
- ('1.4.8', 'essex'),
- ('1.7.4', 'folsom'),
- ('1.8.0', 'grizzly'),
- ('1.7.7', 'grizzly'),
- ('1.7.6', 'grizzly'),
- ('1.10.0', 'havana'),
- ('1.9.1', 'havana'),
- ('1.9.0', 'havana'),
- ('1.13.1', 'icehouse'),
- ('1.13.0', 'icehouse'),
- ('1.12.0', 'icehouse'),
- ('1.11.0', 'icehouse'),
- ('2.0.0', 'juno'),
- ('2.1.0', 'juno'),
- ('2.2.0', 'juno'),
- ('2.2.1', 'kilo'),
- ('2.2.2', 'kilo'),
- ('2.3.0', 'liberty'),
- ('2.4.0', 'liberty'),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12.0.0', 'liberty'),
- ]),
- 'neutron-common': OrderedDict([
- ('7.0.0', 'liberty'),
- ]),
- 'cinder-common': OrderedDict([
- ('7.0.0', 'liberty'),
- ]),
- 'keystone': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
- 'horizon-common': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5.0.0', 'liberty'),
- ]),
- 'heat-common': OrderedDict([
- ('5.0.0', 'liberty'),
- ]),
- 'glance-common': OrderedDict([
- ('11.0.0', 'liberty'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- if match:
- vers = match.group(0)
-
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- swift_vers = vers[:5]
- if swift_vers not in SWIFT_CODENAMES:
- # Deal with 1.10.0 upward
- swift_vers = vers[:6]
- return SWIFT_CODENAMES[swift_vers]
- else:
- vers = vers[:6]
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- else:
- vers_map = OPENSTACK_CODENAMES
-
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
- "--recv-keys %s" % keyid
- try:
- subprocess.check_call(cmd.split(' '))
- except subprocess.CalledProcessError:
- error_out("Error importing repo key %s" % keyid)
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src = rel
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- l = len(rel.split('|'))
- if l == 2:
- src, key = rel.split('|')
- juju_log("Importing PPA key from keyserver for %s" % src)
- import_key(key)
- elif l == 1:
- src = rel
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
- else:
- available_vers = get_os_version_install_source(src)
- apt.init()
- return apt.version_compare(available_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-requirements_dir = None
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-def git_clone_and_install(projects_yaml, core_project, depth=1):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements):
- """
- Clone and install a single git repository.
- """
- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
-
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- if not os.path.exists(dest_dir):
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
- depth=depth)
- else:
- repo_dir = dest_dir
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv)
- else:
- pip_install(repo_dir, venv=venv)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Set workload status based on complete contexts.
- status-set missing or incomplete contexts
- and juju-log details of missing required data.
- charm_func is a charm specific function to run checking
- for charm specific requirements such as a VIP setting.
- """
- incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
- state = 'active'
- missing_relations = []
- incomplete_relations = []
- message = None
- charm_state = None
- charm_message = None
-
- for generic_interface in incomplete_rel_data.keys():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface in incomplete_rel_data[generic_interface]:
- if incomplete_rel_data[generic_interface][interface].get('related'):
- related_interface = interface
- missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
- # No relation ID for the generic_interface
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
- else:
- # Relation ID exists but no related unit
- if not missing_data:
- # Edge case relation ID exists but departing
- if ('departed' in hook_name() or 'broken' in hook_name()) \
- and related_interface in hook_name():
- state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface), "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has "
- "no units in the relation."
- "".format(generic_interface, related_interface), "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in incomplete_relations \
- and generic_interface not in missing_relations:
- incomplete_relations.append(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- # Run charm specific checks
- if charm_func:
- charm_state, charm_message = charm_func(configs)
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- message = "{} {}".format(message, charm_message)
- else:
- message = charm_message
-
- # Set to active if all requirements have been met
- if state == 'active':
- message = "Unit is ready"
- juju_log(message, "INFO")
-
- status_set(state, message)
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """
- Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = []
- for svc_type in required_interfaces.keys():
- # Avoid duplicates
- found_ctxt = False
- for interface in required_interfaces[svc_type]:
- if interface in complete_ctxts:
- found_ctxt = True
- if not found_ctxt:
- incomplete_relations.append(svc_type)
- incomplete_context_data = {}
- for i in incomplete_relations:
- incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
- return incomplete_context_data
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/debug.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/debug.py
deleted file mode 100644
index 871cd6f..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/debug.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-import atexit
-import sys
-
-from charmhelpers.contrib.python.rpdb import Rpdb
-from charmhelpers.core.hookenv import (
- open_port,
- close_port,
- ERROR,
- log
-)
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-DEFAULT_ADDR = "0.0.0.0"
-DEFAULT_PORT = 4444
-
-
-def _error(message):
- log(message, level=ERROR)
-
-
-def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
- """
- Set a trace point using the remote debugger
- """
- atexit.register(close_port, port)
- try:
- log("Starting a remote python debugger session on %s:%s" % (addr,
- port))
- open_port(port)
- debugger = Rpdb(addr=addr, port=port)
- debugger.set_trace(sys._getframe().f_back)
- except:
- _error("Cannot start a remote debug session on %s:%s" % (addr,
- port))
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/packages.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index 10b32e3..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-try:
- from pip import main as pip_execute
-except ImportError:
- apt_update()
- apt_install('python-pip')
- from pip import main as pip_execute
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, **options):
- """Install a requirements file """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- apt_install('python-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/rpdb.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/rpdb.py
deleted file mode 100644
index d503f88..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/rpdb.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Remote Python Debugger (pdb wrapper)."""
-
-import pdb
-import socket
-import sys
-
-__author__ = "Bertrand Janin <b@janin.com>"
-__version__ = "0.1.3"
-
-
-class Rpdb(pdb.Pdb):
-
- def __init__(self, addr="127.0.0.1", port=4444):
- """Initialize the socket and initialize pdb."""
-
- # Backup stdin and stdout before replacing them by the socket handle
- self.old_stdout = sys.stdout
- self.old_stdin = sys.stdin
-
- # Open a 'reusable' socket to let the webapp reload on the same port
- self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
- self.skt.bind((addr, port))
- self.skt.listen(1)
- (clientsocket, address) = self.skt.accept()
- handle = clientsocket.makefile('rw')
- pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
- sys.stdout = sys.stdin = handle
-
- def shutdown(self):
- """Revert stdin and stdout, close the socket."""
- sys.stdout = self.old_stdout
- sys.stdin = self.old_stdin
- self.skt.close()
- self.set_continue()
-
- def do_continue(self, arg):
- """Stop all operation on ``continue``."""
- self.shutdown()
- return 1
-
- do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/version.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/version.py
deleted file mode 100644
index c39fcbf..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/python/version.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def current_version():
- """Current system python version"""
- return sys.version_info
-
-
-def current_version_string():
- """Current system python version as string major.minor.micro"""
- return "{0}.{1}.{2}".format(sys.version_info.major,
- sys.version_info.minor,
- sys.version_info.micro)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index 83f264d..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,657 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-
-from charmhelpers.core.kernel import modprobe
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def get_osds(service):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
- """
- version = ceph_version()
- if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
-
- return None
-
-
-def create_pool(service, name, replicas=3):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pgnum = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pgnum = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
- check_call(cmd)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
- str(replicas)]
- check_call(cmd)
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes():
- """Query named relation 'ceph' to determine current nodes."""
- hosts = []
- for r_id in relation_ids('ceph'):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- create_keyring(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- Returns False if no ceph key is available in relation state.
- """
- key = None
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- create_keyring(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_create_pool(self, name, replica_count=3):
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op']:
- if self.ops[req_no][key] != other.ops[req_no][key]:
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-# <Request complete actions>
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids('ceph'):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids('ceph'):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index c296f09..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- devs = [d.strip().split(' ') for d in
- check_output(cmd).splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index 34b5f71..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index 1e57941..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- check_call,
- check_output,
- call
-)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- is_partition = bool(re.search(r".*[0-9]+\b", device))
- out = check_output(['mount']).decode('UTF-8')
- if is_partition:
- return bool(re.search(device + r"\b", out))
- return bool(re.search(device + r"[0-9]*\b", out))
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/decorators.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/files.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/fstab.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/hookenv.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index ab53a78..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,898 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peer'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peer``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peer'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/host.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/host.py
deleted file mode 100644
index cb3c527..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/hugepage.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 4aaca3f..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/kernel.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/base.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3f67783..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/strutils.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/sysctl.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/templating.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/unitdata.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index efd7f9f..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f023b26..0000000
--- a/charms/trusty/contrail-configuration/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-configuration/hooks/config-changed b/charms/trusty/contrail-configuration/hooks/config-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-broken b/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-broken
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-changed b/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-departed b/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-departed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail-analytics-api-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail-api-relation-joined b/charms/trusty/contrail-configuration/hooks/contrail-api-relation-joined
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail-api-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail-discovery-relation-joined b/charms/trusty/contrail-configuration/hooks/contrail-discovery-relation-joined
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail-discovery-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail-ifmap-relation-joined b/charms/trusty/contrail-configuration/hooks/contrail-ifmap-relation-joined
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail-ifmap-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/contrail_configuration_hooks.py b/charms/trusty/contrail-configuration/hooks/contrail_configuration_hooks.py
deleted file mode 100755
index 2c23b2a..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail_configuration_hooks.py
+++ /dev/null
@@ -1,478 +0,0 @@
-#!/usr/bin/env python
-
-from socket import gethostbyname
-import sys
-
-from apt_pkg import version_compare
-import json
-import urllib2
-import yaml
-
-from charmhelpers.contrib.openstack.utils import configure_installation_source
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- is_leader,
- leader_get,
- leader_set,
- local_unit,
- log,
- related_units,
- relation_get,
- relation_id,
- relation_ids,
- relation_set,
- remote_unit,
- unit_get
-)
-
-from charmhelpers.core.host import (
- pwgen,
- restart_on_change,
- service_restart
-)
-
-from charmhelpers.fetch import (
- apt_install,
- apt_upgrade,
- configure_sources
-)
-
-from contrail_configuration_utils import (
- CONTRAIL_VERSION,
- api_port,
- cassandra_units,
- check_url,
- contrail_floating_ip_create,
- contrail_floating_ip_deactivate,
- contrail_floating_ip_delete,
- contrail_floating_ip_use,
- discovery_port,
- dpkg_version,
- fix_services,
- provision_configuration,
- provision_metadata,
- units,
- unprovision_configuration,
- unprovision_metadata,
- write_barbican_auth_config,
- write_contrail_api_config,
- write_contrail_schema_config,
- write_contrail_svc_monitor_config,
- write_device_manager_config,
- write_discovery_config,
- write_ifmap_config,
- write_nodemgr_config,
- write_vnc_api_config
-)
-
-PACKAGES = [ "ifmap-server", "contrail-config", "contrail-config-openstack",
- "neutron-common", "contrail-utils", "contrail-nodemgr" ]
-
-PACKAGES_BARBICAN = [ "python-barbicanclient" ]
-
-hooks = Hooks()
-config = config()
-
-def add_contrail_api():
- # check relation dependencies
- if not config_get("contrail-api-configured") \
- and config_get("amqp-ready") \
- and config_get("cassandra-ready") \
- and config_get("identity-admin-ready") \
- and config_get("zookeeper-ready"):
- api_p = api_port()
- port = str(api_p)
- try:
- # wait until api is up
- check_url("http://localhost:" + port)
- except urllib2.URLError:
- log("contrail-api service has failed to start correctly on port {}".format(port),
- "CRITICAL")
- log("This is typically due to a runtime error in related services",
- "CRITICAL")
- raise
- # provision configuration on 3.0.2.0+
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- provision_configuration()
- config["contrail-api-configured"] = True
-
- # inform relations
- for rid in relation_ids("contrail-api"):
- relation_set(relation_id=rid, port=api_p, vip=config.get("vip"))
-
- configure_floating_ip_pools()
-
-def add_metadata():
- # check relation dependencies
- if is_leader() \
- and not leader_get("metadata-provisioned") \
- and config_get("contrail-api-configured") \
- and config_get("neutron-metadata-ready"):
- provision_metadata()
- leader_set({"metadata-provisioned": True})
-
-@hooks.hook("amqp-relation-changed")
-def amqp_changed():
- if not relation_get("password"):
- log("Relation not ready")
- return
- amqp_relation()
- config["amqp-ready"] = True
- add_contrail_api()
- add_metadata()
-
-@hooks.hook("amqp-relation-departed")
-@hooks.hook("amqp-relation-broken")
-def amqp_departed():
- if not units("amqp"):
- remove_metadata()
- remove_contrail_api()
- config["amqp-ready"] = False
- amqp_relation()
-
-@restart_on_change({"/etc/contrail/contrail-api.conf": ["supervisor-config"],
- "/etc/contrail/contrail-device-manager.conf": ["supervisor-config"],
- "/etc/contrail/contrail-schema.conf": ["supervisor-config"],
- "/etc/contrail/contrail-svc-monitor.conf": ["supervisor-config"]})
-def amqp_relation():
- write_contrail_api_config()
- write_contrail_svc_monitor_config()
- write_device_manager_config()
- if version_compare(CONTRAIL_VERSION, "3.0") >= 0:
- write_contrail_schema_config()
-
-@hooks.hook("amqp-relation-joined")
-def amqp_joined():
- relation_set(username="contrail", vhost="contrail")
-
-@hooks.hook("cassandra-relation-changed")
-def cassandra_changed():
- # 'port' is used in legacy precise charm
- if not relation_get("rpc_port") and not relation_get("port"):
- log("Relation not ready")
- return
- if not config.get("cassandra-ready"):
- units = len(cassandra_units())
- required = config["cassandra-units"]
- if units < required:
- log("{} cassandra unit(s) ready, require {} more".format(units, required - units))
- return
- config["cassandra-ready"] = True
- cassandra_relation()
- add_contrail_api()
- add_metadata()
-
-@hooks.hook("cassandra-relation-departed")
-@hooks.hook("cassandra-relation-broken")
-def cassandra_departed():
- if not units("cassandra"):
- remove_metadata()
- remove_contrail_api()
- config["cassandra-ready"] = False
- cassandra_relation()
-
-@restart_on_change({"/etc/contrail/contrail-api.conf": ["supervisor-config"],
- "/etc/contrail/contrail-device-manager.conf": ["supervisor-config"],
- "/etc/contrail/contrail-discovery.conf": ["supervisor-config"],
- "/etc/contrail/contrail-schema.conf": ["supervisor-config"],
- "/etc/contrail/contrail-svc-monitor.conf": ["supervisor-config"],
- "/etc/contrail/discovery.conf": ["supervisor-config"]})
-def cassandra_relation():
- write_contrail_api_config()
- write_contrail_schema_config()
- write_discovery_config()
- write_contrail_svc_monitor_config()
- write_device_manager_config()
-
-@hooks.hook("config-changed")
-def config_changed():
- if config_get("contrail-api-configured"):
- configure_floating_ip_pools()
- vip = config.get("vip")
- for rid in relation_ids("contrail-api"):
- relation_set(relation_id=rid, vip=vip)
- for rid in relation_ids("contrail-discovery"):
- relation_set(relation_id=rid, vip=vip)
-
-def config_get(key):
- try:
- return config[key]
- except KeyError:
- return None
-
-def configure_floating_ip_pools():
- if is_leader():
- floating_pools = config.get("floating-ip-pools")
- previous_floating_pools = leader_get("floating-ip-pools")
- if floating_pools != previous_floating_pools:
- # create/destroy pools, activate/deactivate projects
- # according to new value
- pools = { (pool["project"],
- pool["network"],
- pool["pool-name"]): set(pool["target-projects"])
- for pool in yaml.safe_load(floating_pools) } \
- if floating_pools else {}
- previous_pools = {}
- if previous_floating_pools:
- for pool in yaml.safe_load(previous_floating_pools):
- projects = pool["target-projects"]
- name = (pool["project"], pool["network"], pool["pool-name"])
- if name in pools:
- previous_pools[name] = set(projects)
- else:
- floating_ip_pool_delete(name, projects)
- for name, projects in pools.iteritems():
- if name not in previous_pools:
- floating_ip_pool_create(name, projects)
- else:
- floating_ip_pool_update(name, projects, previous_pools[name])
-
- leader_set({"floating-ip-pools": floating_pools})
-
-@hooks.hook("contrail-analytics-api-relation-changed")
-def contrail_analytics_api_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- contrail_analytics_api_relation()
-
-@hooks.hook("contrail-analytics-api-relation-departed")
-@hooks.hook("contrail-analytics-api-relation-broken")
-@restart_on_change({"/etc/contrail/contrail-svc-monitor.conf": ["supervisor-config"]})
-def contrail_analytics_api_relation():
- write_contrail_svc_monitor_config()
-
-@hooks.hook("contrail-api-relation-joined")
-def contrail_api_joined():
- if config_get("contrail-api-configured"):
- relation_set(port=api_port(), vip=config.get("vip"))
-
-@hooks.hook("contrail-discovery-relation-joined")
-def contrail_discovery_joined():
- relation_set(port=discovery_port(), vip=config.get("vip"))
-
-@hooks.hook("contrail-ifmap-relation-joined")
-def contrail_ifmap_joined():
- if is_leader():
- creds = leader_get("ifmap-creds")
- creds = json.loads(creds) if creds else {}
-
- # prune credentials because we can't remove them directly lp #1469731
- creds = { rid: { unit: units[unit]
- for unit, units in
- ((unit, creds[rid]) for unit in related_units(rid))
- if unit in units }
- for rid in relation_ids("contrail-ifmap")
- if rid in creds }
-
- rid = relation_id()
- if rid not in creds:
- creds[rid] = {}
- cs = creds[rid]
- unit = remote_unit()
- if unit in cs:
- return
- # generate new credentials for unit
- cs[unit] = { "username": unit, "password": pwgen(32) }
- leader_set({"ifmap-creds": json.dumps(creds)})
- write_ifmap_config()
- service_restart("supervisor-config")
- relation_set(creds=json.dumps(cs))
-
-def floating_ip_pool_create(name, projects):
- # create pool
- fq_network = "default-domain:" + ":".join(name[:2])
- contrail_floating_ip_create(fq_network, name[2])
-
- # activate pool for projects
- fq_pool_name = "default-domain:" + ":".join(name)
- for project in projects:
- fq_project = "default-domain:" + project
- contrail_floating_ip_use(fq_project, fq_pool_name)
-
-def floating_ip_pool_delete(name, projects):
- # deactivate pool for projects
- fq_pool_name = "default-domain:" + ":".join(name)
- for project in projects:
- fq_project = "default-domain:" + project
- contrail_floating_ip_deactivate(fq_project, fq_pool_name)
-
- # delete pool
- fq_network = "default-domain:" + ":".join(name[:2])
- contrail_floating_ip_delete(fq_network, name[2])
-
-def floating_ip_pool_update(name, projects, previous_projects):
- fq_pool_name = "default-domain:" + ":".join(name)
-
- # deactivate pool for projects
- for project in (previous_projects - projects):
- fq_project = "default-domain:" + project
- contrail_floating_ip_deactivate(fq_project, fq_pool_name)
-
- # activate pool for projects
- for project in (projects - previous_projects):
- fq_project = "default-domain:" + project
- contrail_floating_ip_use(fq_project, fq_pool_name)
-
-@hooks.hook("http-services-relation-joined")
-def http_services_joined():
- name = local_unit().replace("/", "-")
- addr = gethostbyname(unit_get("private-address"))
- services = [ { "service_name": "contrail-api",
- "service_host": "0.0.0.0",
- "service_port": 8082,
- "service_options": [ "mode http", "balance leastconn", "option httpchk GET /Snh_SandeshUVECacheReq?x=NodeStatus HTTP/1.0" ],
- "servers": [ [ name, addr, api_port(), "check port 8084" ] ] },
- { "service_name": "contrail-discovery",
- "service_host": "0.0.0.0",
- "service_port": 5998,
- "service_options": [ "mode http", "balance leastconn", "option httpchk GET /services HTTP/1.0" ],
- "servers": [ [ name, addr, discovery_port(), "check" ] ] } ]
- relation_set(services=yaml.dump(services))
-
-@hooks.hook("identity-admin-relation-changed")
-def identity_admin_changed():
- if not relation_get("service_hostname"):
- log("Relation not ready")
- return
- identity_admin_relation()
- config["identity-admin-ready"] = True
- add_contrail_api()
- add_metadata()
-
-@hooks.hook("identity-admin-relation-departed")
-@hooks.hook("identity-admin-relation-broken")
-def identity_admin_departed():
- if not units("identity-admin"):
- remove_metadata()
- remove_contrail_api()
- config["identity-admin-ready"] = False
- identity_admin_relation()
-
-@restart_on_change({"/etc/contrail/contrail-api.conf": ["supervisor-config"],
- "/etc/contrail/contrail-device-manager.conf": ["supervisor-config"],
- "/etc/contrail/contrail-schema.conf": ["supervisor-config"],
- "/etc/contrail/contrail-svc-monitor.conf": ["supervisor-config"]})
-def identity_admin_relation():
- write_contrail_api_config()
- write_contrail_schema_config()
- write_contrail_svc_monitor_config()
- write_device_manager_config()
- write_vnc_api_config()
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- write_barbican_auth_config()
-
-@hooks.hook()
-def install():
- configure_installation_source(config["openstack-origin"])
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- apt_install(PACKAGES, fatal=True)
-
- contrail_version = dpkg_version("contrail-config")
- openstack_version = dpkg_version("neutron-common")
- if version_compare(contrail_version, "3.0.2.0-34") >= 0 \
- and version_compare(openstack_version, "2:7.0.0") >= 0:
- # install barbican packages
- apt_install(PACKAGES_BARBICAN, fatal=True)
-
- fix_services()
- write_nodemgr_config()
- service_restart("contrail-config-nodemgr")
-
-@hooks.hook("leader-settings-changed")
-@restart_on_change({"/etc/ifmap-server/basicauthusers.properties": ["supervisor-config"]})
-def leader_changed():
- write_ifmap_config()
- creds = leader_get("ifmap-creds")
- creds = json.loads(creds) if creds else {}
- # set same credentials on relation
- for rid in relation_ids("contrail-ifmap"):
- if rid in creds:
- relation_set(relation_id=rid, creds=json.dumps(creds[rid]))
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
-
-@hooks.hook("neutron-metadata-relation-changed")
-def neutron_metadata_changed():
- if not relation_get("shared-secret"):
- log("Relation not ready")
- return
- config["neutron-metadata-ready"] = True
- add_metadata()
-
-@hooks.hook("neutron-metadata-relation-departed")
-@hooks.hook("neutron-metadata-relation-broken")
-def neutron_metadata_departed():
- if not units("neutron-metadata"):
- remove_metadata()
- config["neutron-metadata-ready"] = False
-
-def remove_contrail_api():
- if config_get("contrail-api-configured"):
- # unprovision configuration on 3.0.2.0+
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- unprovision_configuration()
- config["contrail-api-configured"] = False
-
-def remove_metadata():
- if is_leader() and leader_get("metadata-provisioned"):
- # impossible to know if current hook is firing because
- # relation or leader is being removed lp #1469731
- if not relation_ids("cluster"):
- unprovision_metadata()
- leader_set({"metadata-provisioned": ""})
-
-@hooks.hook("upgrade-charm")
-def upgrade_charm():
- write_ifmap_config()
- write_contrail_api_config()
- write_contrail_schema_config()
- write_discovery_config()
- write_contrail_svc_monitor_config()
- write_device_manager_config()
- write_vnc_api_config()
- write_nodemgr_config()
- service_restart("supervisor-config")
-
-@hooks.hook("zookeeper-relation-changed")
-def zookeeper_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- zookeeper_relation()
- config["zookeeper-ready"] = True
- add_contrail_api()
- add_metadata()
-
-@hooks.hook("zookeeper-relation-departed")
-@hooks.hook("zookeeper-relation-broken")
-def zookeeper_departed():
- if not units("zookeeper"):
- remove_metadata()
- remove_contrail_api()
- config["zookeeper-ready"] = False
- zookeeper_relation()
-
-@restart_on_change({"/etc/contrail/contrail-api.conf": ["supervisor-config"],
- "/etc/contrail/contrail-device-manager.conf": ["supervisor-config"],
- "/etc/contrail/contrail-discovery.conf": ["supervisor-config"],
- "/etc/contrail/contrail-schema.conf": ["supervisor-config"],
- "/etc/contrail/contrail-svc-monitor.conf": ["supervisor-config"],
- "/etc/contrail/discovery.conf": ["supervisor-config"]})
-def zookeeper_relation():
- write_contrail_api_config()
- write_contrail_schema_config()
- write_discovery_config()
- write_contrail_svc_monitor_config()
- write_device_manager_config()
-
-if __name__ == "__main__":
- main()
diff --git a/charms/trusty/contrail-configuration/hooks/contrail_configuration_utils.py b/charms/trusty/contrail-configuration/hooks/contrail_configuration_utils.py
deleted file mode 100644
index bb11693..0000000
--- a/charms/trusty/contrail-configuration/hooks/contrail_configuration_utils.py
+++ /dev/null
@@ -1,503 +0,0 @@
-from collections import OrderedDict
-import functools
-import os
-import pwd
-import shutil
-from socket import gethostbyname, gethostname
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output
-)
-from time import sleep, time
-
-import apt_pkg
-from apt_pkg import version_compare
-import json
-import urllib2
-
-from charmhelpers.core.hookenv import (
- config,
- leader_get,
- log,
- related_units,
- relation_get,
- relation_ids,
- relation_type,
- remote_unit,
- unit_get
-)
-
-from charmhelpers.core.host import (
- service_available,
- service_restart,
- service_stop
-)
-
-from charmhelpers.core.templating import render
-
-apt_pkg.init()
-
-def dpkg_version(pkg):
- try:
- return check_output(["dpkg-query", "-f", "${Version}\\n", "-W", pkg]).rstrip()
- except CalledProcessError:
- return None
-
-CONTRAIL_VERSION = dpkg_version("contrail-config")
-
-config = config()
-
-def retry(f=None, timeout=10, delay=2):
- """Retry decorator.
-
- Provides a decorator that can be used to retry a function if it raises
- an exception.
-
- :param timeout: timeout in seconds (default 10)
- :param delay: retry delay in seconds (default 2)
-
- Examples::
-
- # retry fetch_url function
- @retry
- def fetch_url():
- # fetch url
-
- # retry fetch_url function for 60 secs
- @retry(timeout=60)
- def fetch_url():
- # fetch url
- """
- if not f:
- return functools.partial(retry, timeout=timeout, delay=delay)
- @functools.wraps(f)
- def func(*args, **kwargs):
- start = time()
- error = None
- while True:
- try:
- return f(*args, **kwargs)
- except Exception as e:
- error = e
- elapsed = time() - start
- if elapsed >= timeout:
- raise error
- remaining = timeout - elapsed
- if delay <= remaining:
- sleep(delay)
- else:
- sleep(remaining)
- raise error
- return func
-
-def amqp_ctx():
- ctxs = []
- servers = OrderedDict()
- for rid in relation_ids("amqp"):
- for unit in related_units(rid):
- password = relation_get("password", unit, rid)
- if not password:
- continue
- ctxs.append({"rabbit_user": "contrail",
- "rabbit_password": password,
- "rabbit_vhost": "contrail"})
- vip = relation_get("vip", unit, rid)
- server = (vip if vip \
- else gethostbyname(relation_get("hostname", unit, rid))) + ":5672"
- servers[server] = None
- ctx = ctxs[0] if ctxs else {}
- ctx["rabbit_servers"] = servers.keys()
- return ctx
-
-def analytics_api_ctx():
- ctxs = [ { "analytics_server_ip": vip if vip \
- else gethostbyname(relation_get("private-address", unit, rid)),
- "analytics_server_port": port }
- for rid in relation_ids("contrail-analytics-api")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def api_port():
- return 8082
-
-def cassandra_ctx():
- servers = [ gethostbyname(relation_get("private-address", unit, rid))
- + ":" + (rpc_port if rpc_port else port)
- for rid in relation_ids("cassandra")
- for unit, rpc_port, port in
- ((unit, relation_get("rpc_port", unit, rid), relation_get("port", unit, rid))
- for unit in related_units(rid))
- if rpc_port or port ] \
- if config.get("cassandra-ready") else []
- return { "cassandra_servers": servers }
-
-def cassandra_units():
- """Return a list of cassandra units"""
- return [ unit for rid in relation_ids("cassandra")
- for unit in related_units(rid)
- if relation_get("rpc_port", unit, rid) or relation_get("port", unit, rid) ]
-
-@retry(timeout=300, delay=10)
-def check_url(url):
- try:
- urllib2.urlopen(url)
- except urllib2.HTTPError:
- pass
-
-def contrail_ctx():
- addr = gethostbyname(unit_get("private-address"))
- return { "api_port": api_port(),
- "ifmap_server": addr,
- "disc_server": addr,
- "disc_port": discovery_port() }
-
-def contrail_floating_ip_create(network, name):
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Creating floating ip pool {} for network {}".format(name, network))
- check_call(["python", "/usr/share/contrail-utils/create_floating_pool.py",
- "--public_vn_name", network,
- "--floating_ip_pool_name", name,
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant", tenant])
-
-def contrail_floating_ip_deactivate(project, name):
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Deactivating floating ip pool {} for project {}".format(name, project))
- check_call(["scripts/deactivate_floating_pool.py",
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant", tenant,
- project, name])
-
-def contrail_floating_ip_delete(network, name):
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Deleting floating ip pool {} for network {}".format(name, network))
- check_call(["scripts/delete_floating_pool.py",
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant", tenant,
- network, name])
-
-def contrail_floating_ip_use(project, name):
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Activating floating ip pool {} for project {}".format(name, project))
- check_call(["python", "/usr/share/contrail-utils/use_floating_pool.py",
- "--project_name", project,
- "--floating_ip_pool_name", name,
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant", tenant])
-
-def contrail_ifmap_ctx():
- creds = []
- cs = leader_get("ifmap-creds")
- if cs:
- cs = json.loads(cs)
- for units in cs.itervalues():
- for c in units.itervalues():
- creds.append(c)
- return { "ifmap_creds": creds }
-
-def discovery_port():
- return 5998
-
-def fix_ifmap_server():
- # disable ifmap-server upstart service
- if service_available("ifmap-server"):
- service_stop("ifmap-server")
- with open("/etc/init/ifmap-server.override", "w") as conf:
- conf.write("manual\n")
-
- # use supervisord config
- shutil.copy("files/ifmap.ini", "/etc/contrail/supervisord_config_files")
- pw = pwd.getpwnam("contrail")
- os.chown("/etc/contrail/supervisord_config_files/ifmap.ini", pw.pw_uid,
- pw.pw_gid)
- shutil.copy("files/ifmap", "/etc/init.d")
- os.chmod("/etc/init.d/ifmap", 0755)
-
-def fix_nodemgr():
- # add files missing from contrail-nodemgr package
- shutil.copy("files/contrail-nodemgr-config.ini",
- "/etc/contrail/supervisord_config_files")
- pw = pwd.getpwnam("contrail")
- os.chown("/etc/contrail/supervisord_config_files/contrail-nodemgr-config.ini",
- pw.pw_uid, pw.pw_gid)
- shutil.copy("files/contrail-config-nodemgr", "/etc/init.d")
- os.chmod("/etc/init.d/contrail-config-nodemgr", 0755)
-
- # fake ntp status when inside a container
- if is_container():
- shutil.copy("files/ntpq-nodemgr", "/usr/local/bin/ntpq")
-
-def fix_permissions():
- os.chmod("/etc/contrail", 0755)
- os.chown("/etc/contrail", 0, 0)
-
-def fix_scripts():
- version = dpkg_version("contrail-config")
- if version_compare(version, "2.01") >= 0:
- # supervisord and init scripts need correcting on contrail 2.01+
- for service in [ "contrail-api", "contrail-discovery" ]:
- # remove hardcoded port
- check_call(["sed", "-E", "-i", "-e",
- "s/ --listen_port [^[:blank:]]+//",
- "/etc/contrail/supervisord_config_files/{}.ini".format(service)])
-
- # fix init script
- check_call(["sed", "-i", "-e",
- "s/`basename ${0}`$/\"`basename ${0}`:*\"/",
- "/etc/init.d/{}".format(service)])
-
-def fix_services():
- fix_permissions()
- fix_ifmap_server()
- fix_nodemgr()
- fix_scripts()
- service_restart("supervisor-config")
-
-def identity_admin_ctx():
- ctxs = [ { "auth_host": gethostbyname(hostname),
- "auth_port": relation_get("service_port", unit, rid),
- "admin_user": relation_get("service_username", unit, rid),
- "admin_password": relation_get("service_password", unit, rid),
- "admin_tenant_name": relation_get("service_tenant_name", unit, rid),
- "auth_region": relation_get("service_region", unit, rid) }
- for rid in relation_ids("identity-admin")
- for unit, hostname in
- ((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
- if hostname ]
- return ctxs[0] if ctxs else {}
-
-def is_container():
- """Return boolean determining if inside container"""
- try:
- check_call(["running-in-container"])
- return True
- except CalledProcessError:
- return False
-
-def provision_configuration():
- hostname = gethostname()
- ip = gethostbyname(unit_get("private-address"))
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid)
- if relation_get("service_hostname", unit, rid) ][0]
- log("Provisioning configuration {}".format(ip))
- check_call(["contrail-provision-config",
- "--host_name", hostname,
- "--host_ip", ip,
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--oper", "add",
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant_name", tenant])
-
-def provision_metadata():
- ip = [ gethostbyname(relation_get("private-address", unit, rid))
- for rid in relation_ids("neutron-metadata")
- for unit in related_units(rid) ][0]
- user, password = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid)
- if relation_get("service_hostname", unit, rid) ][0]
- log("Provisioning metadata service {}:8775".format(ip))
- check_call(["contrail-provision-linklocal",
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--linklocal_service_name", "metadata",
- "--linklocal_service_ip", "169.254.169.254",
- "--linklocal_service_port", "80",
- "--ipfabric_service_ip", ip,
- "--ipfabric_service_port", "8775",
- "--oper", "add",
- "--admin_user", user,
- "--admin_password", password])
-
-def units(relation):
- """Return a list of units for the specified relation"""
- return [ unit for rid in relation_ids(relation)
- for unit in related_units(rid) ]
-
-def unprovision_configuration():
- if not remote_unit():
- return
- hostname = gethostname()
- ip = gethostbyname(unit_get("private-address"))
- relation = relation_type()
- user = None
- password = None
- tenant = None
- if relation == "identity-admin":
- user = relation_get("service_username")
- password = relation_get("service_password")
- tenant = relation_get("service_tenant_name")
- else:
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Unprovisioning configuration {}".format(ip))
- check_call(["contrail-provision-config",
- "--host_name", hostname,
- "--host_ip", ip,
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--oper", "del",
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant_name", tenant])
-
-def unprovision_metadata():
- if not remote_unit():
- return
- relation = relation_type()
- ip = None
- if relation == "neutron-metadata":
- ip = gethostbyname(relation_get("private-address"))
- else:
- ip = [ gethostbyname(relation_get("private-address", unit, rid))
- for rid in relation_ids("neutron-metadata")
- for unit in related_units(rid) ][0]
- user = None
- password = None
- if relation == "identity-admin":
- user = relation_get("service_username")
- password = relation_get("service_password")
- else:
- user, password = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Unprovisioning metadata service {}:8775".format(ip))
- check_call(["contrail-provision-linklocal",
- "--api_server_ip", "127.0.0.1",
- "--api_server_port", str(api_port()),
- "--linklocal_service_name", "metadata",
- "--linklocal_service_ip", "169.254.169.254",
- "--linklocal_service_port", "80",
- "--ipfabric_service_ip", ip,
- "--ipfabric_service_port", "8775",
- "--oper", "del",
- "--admin_user", user,
- "--admin_password", password])
-
-def write_barbican_auth_config():
- ctx = identity_admin_ctx()
- render("contrail-barbican-auth.conf",
- "/etc/contrail/contrail-barbican-auth.conf", ctx, "root", "contrail",
- 0440)
-
-def write_contrail_api_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(cassandra_ctx())
- ctx.update(zookeeper_ctx())
- ctx.update(amqp_ctx())
- ctx.update(identity_admin_ctx())
- render("contrail-api.conf", "/etc/contrail/contrail-api.conf", ctx, "root",
- "contrail", 0440)
-
-def write_contrail_schema_config():
- ctx = {}
- ctx.update(cassandra_ctx())
- ctx.update(zookeeper_ctx())
- ctx.update(identity_admin_ctx())
- ctx.update(contrail_ctx())
- if version_compare(CONTRAIL_VERSION, "3.0") >= 0:
- ctx["rabbitmq"] = True
- ctx.update(amqp_ctx())
- render("contrail-schema.conf", "/etc/contrail/contrail-schema.conf",
- ctx, "root", "contrail", 0440)
-
-def write_contrail_svc_monitor_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(cassandra_ctx())
- ctx.update(zookeeper_ctx())
- ctx.update(amqp_ctx())
- ctx.update(identity_admin_ctx())
- ctx.update(analytics_api_ctx())
- render("contrail-svc-monitor.conf",
- "/etc/contrail/contrail-svc-monitor.conf", ctx, "root", "contrail",
- 0440)
-
-def write_device_manager_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(zookeeper_ctx())
- ctx.update(cassandra_ctx())
- ctx.update(amqp_ctx())
- ctx.update(identity_admin_ctx())
- render("contrail-device-manager.conf",
- "/etc/contrail/contrail-device-manager.conf", ctx, "root",
- "contrail", 0440)
-
-def write_discovery_config():
- ctx = {}
- ctx.update(zookeeper_ctx())
- ctx.update(cassandra_ctx())
- target = "/etc/contrail/contrail-discovery.conf" \
- if version_compare(CONTRAIL_VERSION, "1.20~") >= 0 \
- else "/etc/contrail/discovery.conf"
- render("discovery.conf", target, ctx)
-
-def write_ifmap_config():
- ctx = contrail_ifmap_ctx()
- render("basicauthusers.properties",
- "/etc/ifmap-server/basicauthusers.properties", ctx, "root",
- "contrail", 0440)
-
-def write_nodemgr_config():
- ctx = contrail_ctx()
- render("contrail-config-nodemgr.conf",
- "/etc/contrail/contrail-config-nodemgr.conf", ctx)
-
-def write_vnc_api_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(identity_admin_ctx())
- render("vnc_api_lib.ini", "/etc/contrail/vnc_api_lib.ini", ctx)
-
-def zookeeper_ctx():
- return { "zk_servers": [ gethostbyname(relation_get("private-address", unit, rid))
- + ":" + port
- for rid in relation_ids("zookeeper")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ] }
diff --git a/charms/trusty/contrail-configuration/hooks/http-services-relation-joined b/charms/trusty/contrail-configuration/hooks/http-services-relation-joined
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/http-services-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/identity-admin-relation-broken b/charms/trusty/contrail-configuration/hooks/identity-admin-relation-broken
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/identity-admin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/identity-admin-relation-changed b/charms/trusty/contrail-configuration/hooks/identity-admin-relation-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/identity-admin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/identity-admin-relation-departed b/charms/trusty/contrail-configuration/hooks/identity-admin-relation-departed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/identity-admin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/install b/charms/trusty/contrail-configuration/hooks/install
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/leader-settings-changed b/charms/trusty/contrail-configuration/hooks/leader-settings-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/leader-settings-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-broken b/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-broken
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-changed b/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-departed b/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-departed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/neutron-metadata-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/start b/charms/trusty/contrail-configuration/hooks/start
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/stop b/charms/trusty/contrail-configuration/hooks/stop
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/upgrade-charm b/charms/trusty/contrail-configuration/hooks/upgrade-charm
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/zookeeper-relation-broken b/charms/trusty/contrail-configuration/hooks/zookeeper-relation-broken
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/zookeeper-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/zookeeper-relation-changed b/charms/trusty/contrail-configuration/hooks/zookeeper-relation-changed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/zookeeper-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/hooks/zookeeper-relation-departed b/charms/trusty/contrail-configuration/hooks/zookeeper-relation-departed
deleted file mode 120000
index db4c3cd..0000000
--- a/charms/trusty/contrail-configuration/hooks/zookeeper-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_configuration_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-configuration/icon.svg b/charms/trusty/contrail-configuration/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/contrail-configuration/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/contrail-configuration/metadata.yaml b/charms/trusty/contrail-configuration/metadata.yaml
deleted file mode 100644
index c9d0be4..0000000
--- a/charms/trusty/contrail-configuration/metadata.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-name: contrail-configuration
-summary: OpenContrail Configuration Node
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- OpenContrail is a network virtualization solution that provides an overlay
- virtual-network to virtual-machines, containers or network namespaces.
- .
- This charm provides the configuration node component.
-categories:
- - openstack
-peers:
- cluster:
- interface: contrail-cluster
-provides:
- contrail-api:
- interface: contrail-api
- contrail-discovery:
- interface: contrail-discovery
- contrail-ifmap:
- interface: contrail-ifmap
- http-services:
- interface: http
-requires:
- amqp:
- interface: rabbitmq
- cassandra:
- interface: cassandra
- contrail-analytics-api:
- interface: contrail-analytics-api
- identity-admin:
- interface: keystone-admin
- neutron-metadata:
- interface: neutron-metadata
- zookeeper:
- interface: zookeeper
diff --git a/charms/trusty/contrail-configuration/scripts/deactivate_floating_pool.py b/charms/trusty/contrail-configuration/scripts/deactivate_floating_pool.py
deleted file mode 100755
index b189023..0000000
--- a/charms/trusty/contrail-configuration/scripts/deactivate_floating_pool.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-
-"""Deactivate Floating IP pool for project.
-
-This is a workaround whilst OpenContrail doesn't provide the needed
-functionality in *_floating_pool.py scripts.
-"""
-
-import argparse
-
-from vnc_api import vnc_api
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("project_name", help="Colon separated fully qualified name")
-
-parser.add_argument("floating_ip_pool_name",
- help="Name of the floating IP pool")
-
-parser.add_argument("--api_server_ip", help="IP address of api server",
- default="127.0.0.1")
-
-parser.add_argument("--api_server_port", help="Port of api server",
- default="8082")
-
-parser.add_argument("--admin_user", help="Name of keystone admin user")
-
-parser.add_argument("--admin_password", help="Password of keystone admin user")
-
-parser.add_argument("--admin_tenant_name",
- help="Tenant name for keystone admin user")
-
-args = parser.parse_args()
-
-vnc_lib = vnc_api.VncApi(api_server_host=args.api_server_ip,
- api_server_port=args.api_server_port,
- username=args.admin_user,
- password=args.admin_password,
- tenant_name=args.admin_tenant_name)
-
-project = vnc_lib.project_read(fq_name=args.project_name.split(":"))
-pool = vnc_lib.floating_ip_pool_read(fq_name=args.floating_ip_pool_name.split(":"))
-project.del_floating_ip_pool(pool)
-vnc_lib.project_update(project)
diff --git a/charms/trusty/contrail-configuration/scripts/delete_floating_pool.py b/charms/trusty/contrail-configuration/scripts/delete_floating_pool.py
deleted file mode 100755
index eae20be..0000000
--- a/charms/trusty/contrail-configuration/scripts/delete_floating_pool.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-
-"""Delete Floating IP pool.
-
-This is a workaround whilst OpenContrail doesn't provide the needed
-functionality in *_floating_pool.py scripts.
-"""
-
-import argparse
-
-from vnc_api import vnc_api
-
-parser = argparse.ArgumentParser()
-
-parser.add_argument("public_vn_name",
- help="Colon separated fully qualified name")
-
-parser.add_argument("floating_ip_pool_name",
- help="Name of the floating IP pool")
-
-parser.add_argument("--api_server_ip", help="IP address of api server",
- default="127.0.0.1")
-
-parser.add_argument("--api_server_port", help="Port of api server",
- default="8082")
-
-parser.add_argument("--admin_user", help="Name of keystone admin user")
-
-parser.add_argument("--admin_password", help="Password of keystone admin user")
-
-parser.add_argument("--admin_tenant_name",
- help="Tenant name for keystone admin user")
-
-args = parser.parse_args()
-
-vnc_lib = vnc_api.VncApi(api_server_host=args.api_server_ip,
- api_server_port=args.api_server_port,
- username=args.admin_user,
- password=args.admin_password,
- tenant_name=args.admin_tenant_name)
-
-name = args.public_vn_name.split(":")
-name.append(args.floating_ip_pool_name)
-vnc_lib.floating_ip_pool_delete(fq_name=name)
diff --git a/charms/trusty/contrail-configuration/templates/basicauthusers.properties b/charms/trusty/contrail-configuration/templates/basicauthusers.properties
deleted file mode 100644
index 3a39e5c..0000000
--- a/charms/trusty/contrail-configuration/templates/basicauthusers.properties
+++ /dev/null
@@ -1,25 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-test:test
-dhcp:dhcp
-visual:visual
-sensor:sensor
-
-# compliance testsuite users
-mapclient:mapclient
-helper:mapclient
-
-# This is a read-only MAPC
-reader:reader
-
-# OpenContrail users
-api-server:api-server
-schema-transformer:schema-transformer
-svc-monitor:svc-monitor
-
-{% for cred in ifmap_creds -%}
-{{ cred["username"] }}:{{ cred["password"] }}
-{% endfor -%}
diff --git a/charms/trusty/contrail-configuration/templates/contrail-api.conf b/charms/trusty/contrail-configuration/templates/contrail-api.conf
deleted file mode 100644
index 2040c19..0000000
--- a/charms/trusty/contrail-configuration/templates/contrail-api.conf
+++ /dev/null
@@ -1,30 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-log_file = /var/log/contrail/contrail-api.log
-ifmap_server_ip = {{ ifmap_server }}
-ifmap_username = api-server
-ifmap_password = api-server
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-auth = keystone
-multi_tenancy = True
-disc_server_ip = {{ disc_server }}
-disc_server_port = {{ disc_port }}
-zk_server_ip = {{ zk_servers|join(",") }}
-rabbit_server = {{ rabbit_servers|join(",") }}
-rabbit_user = {{ rabbit_user }}
-rabbit_password = {{ rabbit_password }}
-rabbit_vhost = {{ rabbit_vhost }}
-
-[KEYSTONE]
-auth_host = {{ auth_host }}
-auth_port = {{ auth_port }}
-auth_protocol = http
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_token =
-admin_tenant_name = {{ admin_tenant_name }}
-
diff --git a/charms/trusty/contrail-configuration/templates/contrail-barbican-auth.conf b/charms/trusty/contrail-configuration/templates/contrail-barbican-auth.conf
deleted file mode 100644
index 84ab4a0..0000000
--- a/charms/trusty/contrail-configuration/templates/contrail-barbican-auth.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULT]
-auth_url = http://{{ auth_host }}:{{ auth_port }}/v2.0
-auth_version = 2
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-region = {{ auth_region }}
-
diff --git a/charms/trusty/contrail-configuration/templates/contrail-config-nodemgr.conf b/charms/trusty/contrail-configuration/templates/contrail-config-nodemgr.conf
deleted file mode 100644
index a49d020..0000000
--- a/charms/trusty/contrail-configuration/templates/contrail-config-nodemgr.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DISCOVERY]
-server = {{ disc_server }}
-port = {{ disc_port }}
-
diff --git a/charms/trusty/contrail-configuration/templates/contrail-device-manager.conf b/charms/trusty/contrail-configuration/templates/contrail-device-manager.conf
deleted file mode 100644
index 7dca344..0000000
--- a/charms/trusty/contrail-configuration/templates/contrail-device-manager.conf
+++ /dev/null
@@ -1,25 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-api_server_ip = 127.0.0.1
-api_server_port = {{ api_port }}
-zk_server_ip = {{ zk_servers|join(",") }}
-log_file = /var/log/contrail/contrail-device-manager.log
-log_local = 1
-log_level = SYS_NOTICE
-disc_server_ip = 127.0.0.1
-disc_server_port = {{ disc_port }}
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-rabbit_server = {{ rabbit_servers|join(",") }}
-rabbit_user = {{ rabbit_user }}
-rabbit_password = {{ rabbit_password }}
-rabbit_vhost = {{ rabbit_vhost }}
-
-[KEYSTONE]
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-
diff --git a/charms/trusty/contrail-configuration/templates/contrail-schema.conf b/charms/trusty/contrail-configuration/templates/contrail-schema.conf
deleted file mode 100644
index 7e0d3ed..0000000
--- a/charms/trusty/contrail-configuration/templates/contrail-schema.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-log_file = /var/log/contrail/contrail-schema.log
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-zk_server_ip = {{ zk_servers|join(",") }}
-disc_server_ip = 127.0.0.1
-disc_server_port = {{ disc_port }}
-api_server_port = {{ api_port }}
-{%- if rabbitmq %}
-rabbit_server = {{ rabbit_servers|join(",") }}
-rabbit_user = {{ rabbit_user }}
-rabbit_password = {{ rabbit_password }}
-rabbit_vhost = {{ rabbit_vhost }}
-{%- endif %}
-
-[KEYSTONE]
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-
diff --git a/charms/trusty/contrail-configuration/templates/contrail-svc-monitor.conf b/charms/trusty/contrail-configuration/templates/contrail-svc-monitor.conf
deleted file mode 100644
index bf912b1..0000000
--- a/charms/trusty/contrail-configuration/templates/contrail-svc-monitor.conf
+++ /dev/null
@@ -1,35 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-ifmap_server_ip = 127.0.0.1
-ifmap_server_port = 8443
-ifmap_username = svc-monitor
-ifmap_password = svc-monitor
-api_server_ip = 127.0.0.1
-api_server_port = {{ api_port }}
-zk_server_ip = {{ zk_servers|join(",") }}
-log_file = /var/log/contrail/contrail-svc-monitor.log
-log_local = 1
-log_level = SYS_NOTICE
-disc_server_ip = 127.0.0.1
-disc_server_port = {{ disc_port }}
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-rabbit_server = {{ rabbit_servers|join(",") }}
-rabbit_user = {{ rabbit_user }}
-rabbit_password = {{ rabbit_password }}
-rabbit_vhost = {{ rabbit_vhost }}
-
-[KEYSTONE]
-auth_host = {{ auth_host }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_token =
-
-[SCHEDULER]
-analytics_server_ip = {{ analytics_server_ip }}
-analytics_server_port = {{ analytics_server_port }}
-
diff --git a/charms/trusty/contrail-configuration/templates/discovery.conf b/charms/trusty/contrail-configuration/templates/discovery.conf
deleted file mode 100644
index bcb500d..0000000
--- a/charms/trusty/contrail-configuration/templates/discovery.conf
+++ /dev/null
@@ -1,18 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULTS]
-zk_server_ip = {{ zk_servers|join(",") }}
-listen_ip_addr = 0.0.0.0
-listen_port = 5998
-log_local = True
-log_file = /var/log/contrail/discovery.log
-cassandra_server_list = {{ cassandra_servers|join(" ") }}
-ttl_min = 300
-ttl_max = 1800
-hc_interval = 5
-hc_max_miss = 3
-ttl_short = 1
-
diff --git a/charms/trusty/contrail-configuration/templates/vnc_api_lib.ini b/charms/trusty/contrail-configuration/templates/vnc_api_lib.ini
deleted file mode 100644
index 4cfbac9..0000000
--- a/charms/trusty/contrail-configuration/templates/vnc_api_lib.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[global]
-WEB_SERVER = 127.0.0.1
-WEB_PORT = {{ api_port }}
-
-[auth]
-AUTHN_TYPE = keystone
-AUTHN_PROTOCOL = http
-AUTHN_SERVER = {{ auth_host }}
-AUTHN_PORT = {{ auth_port }}
-AUTHN_URL = /v2.0/tokens
-
diff --git a/charms/trusty/contrail-control/.bzrignore b/charms/trusty/contrail-control/.bzrignore
deleted file mode 100644
index ba077a4..0000000
--- a/charms/trusty/contrail-control/.bzrignore
+++ /dev/null
@@ -1 +0,0 @@
-bin
diff --git a/charms/trusty/contrail-control/Makefile b/charms/trusty/contrail-control/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/contrail-control/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/contrail-control/README.md b/charms/trusty/contrail-control/README.md
deleted file mode 100644
index 702fc29..0000000
--- a/charms/trusty/contrail-control/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This charm provides the control node component which contains the
-contrail-control service.
-Only OpenStack Icehouse or newer is supported.
-
-Usage
------
-
-Contrail Configuration and Keystone are prerequisite services to deploy.
-Once ready, deploy and relate as follows:
-
- juju deploy contrail-control
- juju add-relation contrail-control:contrail-api contrail-configuration:contrail-api
- juju add-relation contrail-control:contrail-discovery contrail-configuration:contrail-discovery
- juju add-relation contrail-control:contrail-ifmap contrail-configuration:contrail-ifmap
- juju add-relation contrail-control keystone
-
-Install Sources
----------------
-
-The version of OpenContrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
-
-The version of dependent OpenStack components installed when deploying can be
-changed using the 'openstack-origin' option. When deploying to different
-OpenStack versions, openstack-origin needs to be set across all OpenStack and
-OpenContrail charms where available.
-
-High Availability (HA)
-----------------------
-
-Multiple units of this charm can be deployed to support HA deployments:
-
- juju add-unit contrail-control
diff --git a/charms/trusty/contrail-control/charm-helpers-sync.yaml b/charms/trusty/contrail-control/charm-helpers-sync.yaml
deleted file mode 100644
index eadff82..0000000
--- a/charms/trusty/contrail-control/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
- - contrib.network
- - contrib.openstack|inc=*
- - contrib.python
- - contrib.storage
diff --git a/charms/trusty/contrail-control/config.yaml b/charms/trusty/contrail-control/config.yaml
deleted file mode 100644
index be706a1..0000000
--- a/charms/trusty/contrail-control/config.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-options:
- install-sources:
- type: string
- default: |
- - "ppa:opencontrail/ppa"
- - "ppa:opencontrail/r2.20"
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
- openstack-origin:
- type: string
- default: distro
- description: |
- Repository from which to install. May be one of the following:
- distro (default), ppa:somecustom/ppa, a deb url sources entry,
- or a supported Cloud Archive release pocket.
- .
- Supported Cloud Archive sources include: cloud:precise-folsom,
- cloud:precise-folsom/updates, cloud:precise-folsom/staging,
- cloud:precise-folsom/proposed.
diff --git a/charms/trusty/contrail-control/copyright b/charms/trusty/contrail-control/copyright
deleted file mode 100644
index 4081144..0000000
--- a/charms/trusty/contrail-control/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2014, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/files/contrail-control-nodemgr b/charms/trusty/contrail-control/files/contrail-control-nodemgr
deleted file mode 100644
index 31da6b8..0000000
--- a/charms/trusty/contrail-control/files/contrail-control-nodemgr
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network Control Node Manager
-
-supervisorctl -s unix:///tmp/supervisord_control.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-control/files/contrail-nodemgr-control.ini b/charms/trusty/contrail-control/files/contrail-nodemgr-control.ini
deleted file mode 100644
index 807aeaf..0000000
--- a/charms/trusty/contrail-control/files/contrail-nodemgr-control.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[eventlistener:contrail-control-nodemgr]
-command=/bin/bash -c "exec python /usr/bin/contrail-nodemgr --nodetype=contrail-control"
-events=PROCESS_COMMUNICATION,PROCESS_STATE,TICK_60
-buffer_size=10000
-stdout_logfile=/var/log/contrail/contrail-control-nodemgr-stdout.log
-stderr_logfile=/var/log/contrail/contrail-control-nodemgr-stderr.log
diff --git a/charms/trusty/contrail-control/files/ntpq-nodemgr b/charms/trusty/contrail-control/files/ntpq-nodemgr
deleted file mode 100755
index da00247..0000000
--- a/charms/trusty/contrail-control/files/ntpq-nodemgr
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-#
-# Script to produce some dummy output to satisfy contrail-nodemgr ntp status
-# Note: This is intended to be deployed inside containers where the host is running ntp
-
-if [ -x /usr/bin/ntpq ]; then
- exec /usr/bin/ntpq "$@"
-fi
-
-echo "*"
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ip.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ip.py
deleted file mode 100644
index 7f3b66b..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ip.py
+++ /dev/null
@@ -1,456 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import glob
-import re
-import subprocess
-import six
-import socket
-
-from functools import partial
-
-from charmhelpers.core.hookenv import unit_get
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- WARNING,
-)
-
-try:
- import netifaces
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netifaces', fatal=True)
- import netifaces
-
-try:
- import netaddr
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-netaddr', fatal=True)
- import netaddr
-
-
-def _validate_cidr(network):
- try:
- netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
-
-def no_ip_found_error_out(network):
- errmsg = ("No IP address found in network: %s" % network)
- raise ValueError(errmsg)
-
-
-def get_address_in_network(network, fallback=None, fatal=False):
- """Get an IPv4 or IPv6 address within the network from the host.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param fallback (str): If no address is found, return fallback.
- :param fatal (boolean): If no address is found, fallback is not
- set and fatal is True then exit(1).
- """
- if network is None:
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
- else:
- return None
-
- _validate_cidr(network)
- network = netaddr.IPNetwork(network)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if network.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- if cidr in network:
- return str(cidr.ip)
-
- if network.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- if cidr in network:
- return str(cidr.ip)
-
- if fallback is not None:
- return fallback
-
- if fatal:
- no_ip_found_error_out(network)
-
- return None
-
-
-def is_ipv6(address):
- """Determine whether provided address is IPv6 or not."""
- try:
- address = netaddr.IPAddress(address)
- except netaddr.AddrFormatError:
- # probably a hostname - so not an address at all!
- return False
-
- return address.version == 6
-
-
-def is_address_in_network(network, address):
- """
- Determine whether the provided address is within a network range.
-
- :param network (str): CIDR presentation format. For example,
- '192.168.1.0/24'.
- :param address: An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :returns boolean: Flag indicating whether address is in network.
- """
- try:
- network = netaddr.IPNetwork(network)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Network (%s) is not in CIDR presentation format" %
- network)
-
- try:
- address = netaddr.IPAddress(address)
- except (netaddr.core.AddrFormatError, ValueError):
- raise ValueError("Address (%s) is not in correct presentation format" %
- address)
-
- if address in network:
- return True
- else:
- return False
-
-
-def _get_for_address(address, key):
- """Retrieve an attribute of or the physical interface that
- the IP address provided could be bound to.
-
- :param address (str): An individual IPv4 or IPv6 address without a net
- mask or subnet prefix. For example, '192.168.1.1'.
- :param key: 'iface' for the physical interface name or an attribute
- of the configured interface, for example 'netmask'.
- :returns str: Requested attribute or None if address is not bindable.
- """
- address = netaddr.IPAddress(address)
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- if address.version == 4 and netifaces.AF_INET in addresses:
- addr = addresses[netifaces.AF_INET][0]['addr']
- netmask = addresses[netifaces.AF_INET][0]['netmask']
- network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- else:
- return addresses[netifaces.AF_INET][0][key]
-
- if address.version == 6 and netifaces.AF_INET6 in addresses:
- for addr in addresses[netifaces.AF_INET6]:
- if not addr['addr'].startswith('fe80'):
- network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
- addr['netmask']))
- cidr = network.cidr
- if address in cidr:
- if key == 'iface':
- return iface
- elif key == 'netmask' and cidr:
- return str(cidr).split('/')[1]
- else:
- return addr[key]
-
- return None
-
-
-get_iface_for_address = partial(_get_for_address, key='iface')
-
-
-get_netmask_for_address = partial(_get_for_address, key='netmask')
-
-
-def format_ipv6_addr(address):
- """If address is IPv6, wrap it in '[]' otherwise return None.
-
- This is required by most configuration files when specifying IPv6
- addresses.
- """
- if is_ipv6(address):
- return "[%s]" % address
-
- return None
-
-
-def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
- fatal=True, exc_list=None):
- """Return the assigned IP address for a given interface, if any."""
- # Extract nic if passed /dev/ethX
- if '/' in iface:
- iface = iface.split('/')[-1]
-
- if not exc_list:
- exc_list = []
-
- try:
- inet_num = getattr(netifaces, inet_type)
- except AttributeError:
- raise Exception("Unknown inet type '%s'" % str(inet_type))
-
- interfaces = netifaces.interfaces()
- if inc_aliases:
- ifaces = []
- for _iface in interfaces:
- if iface == _iface or _iface.split(':')[0] == iface:
- ifaces.append(_iface)
-
- if fatal and not ifaces:
- raise Exception("Invalid interface '%s'" % iface)
-
- ifaces.sort()
- else:
- if iface not in interfaces:
- if fatal:
- raise Exception("Interface '%s' not found " % (iface))
- else:
- return []
-
- else:
- ifaces = [iface]
-
- addresses = []
- for netiface in ifaces:
- net_info = netifaces.ifaddresses(netiface)
- if inet_num in net_info:
- for entry in net_info[inet_num]:
- if 'addr' in entry and entry['addr'] not in exc_list:
- addresses.append(entry['addr'])
-
- if fatal and not addresses:
- raise Exception("Interface '%s' doesn't have any %s addresses." %
- (iface, inet_type))
-
- return sorted(addresses)
-
-
-get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
-
-
-def get_iface_from_addr(addr):
- """Work out on which interface the provided address is configured."""
- for iface in netifaces.interfaces():
- addresses = netifaces.ifaddresses(iface)
- for inet_type in addresses:
- for _addr in addresses[inet_type]:
- _addr = _addr['addr']
- # link local
- ll_key = re.compile("(.+)%.*")
- raw = re.match(ll_key, _addr)
- if raw:
- _addr = raw.group(1)
-
- if _addr == addr:
- log("Address '%s' is configured on iface '%s'" %
- (addr, iface))
- return iface
-
- msg = "Unable to infer net iface on which '%s' is configured" % (addr)
- raise Exception(msg)
-
-
-def sniff_iface(f):
- """Ensure decorated function is called with a value for iface.
-
- If no iface provided, inject net iface inferred from unit private address.
- """
- def iface_sniffer(*args, **kwargs):
- if not kwargs.get('iface', None):
- kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
-
- return f(*args, **kwargs)
-
- return iface_sniffer
-
-
-@sniff_iface
-def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
- dynamic_only=True):
- """Get assigned IPv6 address for a given interface.
-
- Returns list of addresses found. If no address found, returns empty list.
-
- If iface is None, we infer the current primary interface by doing a reverse
- lookup on the unit private-address.
-
- We currently only support scope global IPv6 addresses i.e. non-temporary
- addresses. If no global IPv6 address is found, return the first one found
- in the ipv6 address list.
- """
- addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
- inc_aliases=inc_aliases, fatal=fatal,
- exc_list=exc_list)
-
- if addresses:
- global_addrs = []
- for addr in addresses:
- key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
- m = re.match(key_scope_link_local, addr)
- if m:
- eui_64_mac = m.group(1)
- iface = m.group(2)
- else:
- global_addrs.append(addr)
-
- if global_addrs:
- # Make sure any found global addresses are not temporary
- cmd = ['ip', 'addr', 'show', iface]
- out = subprocess.check_output(cmd).decode('UTF-8')
- if dynamic_only:
- key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
- else:
- key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
-
- addrs = []
- for line in out.split('\n'):
- line = line.strip()
- m = re.match(key, line)
- if m and 'temporary' not in line:
- # Return the first valid address we find
- for addr in global_addrs:
- if m.group(1) == addr:
- if not dynamic_only or \
- m.group(1).endswith(eui_64_mac):
- addrs.append(addr)
-
- if addrs:
- return addrs
-
- if fatal:
- raise Exception("Interface '%s' does not have a scope global "
- "non-temporary ipv6 address." % iface)
-
- return []
-
-
-def get_bridges(vnic_dir='/sys/devices/virtual/net'):
- """Return a list of bridges on the system."""
- b_regex = "%s/*/bridge" % vnic_dir
- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
-
-
-def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
- """Return a list of nics comprising a given bridge on the system."""
- brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
- return [x.split('/')[-1] for x in glob.glob(brif_regex)]
-
-
-def is_bridge_member(nic):
- """Check if a given nic is a member of a bridge."""
- for bridge in get_bridges():
- if nic in get_bridge_nics(bridge):
- return True
-
- return False
-
-
-def is_ip(address):
- """
- Returns True if address is a valid IP address.
- """
- try:
- # Test to see if already an IPv4 address
- socket.inet_aton(address)
- return True
- except socket.error:
- return False
-
-
-def ns_query(address):
- try:
- import dns.resolver
- except ImportError:
- apt_install('python-dnspython')
- import dns.resolver
-
- if isinstance(address, dns.name.Name):
- rtype = 'PTR'
- elif isinstance(address, six.string_types):
- rtype = 'A'
- else:
- return None
-
- answers = dns.resolver.query(address, rtype)
- if answers:
- return str(answers[0])
- return None
-
-
-def get_host_ip(hostname, fallback=None):
- """
- Resolves the IP for a given hostname, or returns
- the input if it is already an IP.
- """
- if is_ip(hostname):
- return hostname
-
- ip_addr = ns_query(hostname)
- if not ip_addr:
- try:
- ip_addr = socket.gethostbyname(hostname)
- except:
- log("Failed to resolve hostname '%s'" % (hostname),
- level=WARNING)
- return fallback
- return ip_addr
-
-
-def get_hostname(address, fqdn=True):
- """
- Resolves hostname for given IP, or returns the input
- if it is already a hostname.
- """
- if is_ip(address):
- try:
- import dns.reversename
- except ImportError:
- apt_install("python-dnspython")
- import dns.reversename
-
- rev = dns.reversename.from_address(address)
- result = ns_query(rev)
-
- if not result:
- try:
- result = socket.gethostbyaddr(address)[0]
- except:
- return None
- else:
- result = address
-
- if fqdn:
- # strip trailing .
- if result.endswith('.'):
- return result[:-1]
- else:
- return result
- else:
- return result.split('.')[0]
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ovs/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ovs/__init__.py
deleted file mode 100644
index 77e2db7..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helpers for interacting with OpenvSwitch '''
-import subprocess
-import os
-from charmhelpers.core.hookenv import (
- log, WARNING
-)
-from charmhelpers.core.host import (
- service
-)
-
-
-def add_bridge(name):
- ''' Add the named bridge to openvswitch '''
- log('Creating bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
-
-
-def del_bridge(name):
- ''' Delete the named bridge from openvswitch '''
- log('Deleting bridge {}'.format(name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
-
-
-def add_bridge_port(name, port, promisc=False):
- ''' Add a port to the named openvswitch bridge '''
- log('Adding port {} to bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "up"])
- if promisc:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
- else:
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def del_bridge_port(name, port):
- ''' Delete a port from the named openvswitch bridge '''
- log('Deleting port {} from bridge {}'.format(port, name))
- subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
- name, port])
- subprocess.check_call(["ip", "link", "set", port, "down"])
- subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
-
-
-def set_manager(manager):
- ''' Set the controller for the local openvswitch '''
- log('Setting manager for local ovs to {}'.format(manager))
- subprocess.check_call(['ovs-vsctl', 'set-manager',
- 'ssl:{}'.format(manager)])
-
-
-CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
-
-
-def get_certificate():
- ''' Read openvswitch certificate from disk '''
- if os.path.exists(CERT_PATH):
- log('Reading ovs certificate from {}'.format(CERT_PATH))
- with open(CERT_PATH, 'r') as cert:
- full_cert = cert.read()
- begin_marker = "-----BEGIN CERTIFICATE-----"
- end_marker = "-----END CERTIFICATE-----"
- begin_index = full_cert.find(begin_marker)
- end_index = full_cert.rfind(end_marker)
- if end_index == -1 or begin_index == -1:
- raise RuntimeError("Certificate does not contain valid begin"
- " and end markers.")
- full_cert = full_cert[begin_index:(end_index + len(end_marker))]
- return full_cert
- else:
- log('Certificate not found', level=WARNING)
- return None
-
-
-def full_restart():
- ''' Full restart and reload of openvswitch '''
- if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
- service('start', 'openvswitch-force-reload-kmod')
- else:
- service('force-reload-kmod', 'openvswitch-switch')
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ufw.py
deleted file mode 100644
index b65d963..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/network/ufw.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""
-This module contains helpers to add and remove ufw rules.
-
-Examples:
-
-- open SSH port for subnet 10.0.3.0/24:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
-
-- open service by name as defined in /etc/services:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('ssh', 'open')
-
-- close service by port number:
-
- >>> from charmhelpers.contrib.network import ufw
- >>> ufw.enable()
- >>> ufw.service('4949', 'close') # munin
-"""
-import re
-import os
-import subprocess
-
-from charmhelpers.core import hookenv
-from charmhelpers.core.kernel import modprobe, is_module_loaded
-
-__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
-
-
-class UFWError(Exception):
- pass
-
-
-class UFWIPv6Error(UFWError):
- pass
-
-
-def is_enabled():
- """
- Check if `ufw` is enabled
-
- :returns: True if ufw is enabled
- """
- output = subprocess.check_output(['ufw', 'status'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Status: active\n', output, re.M)
-
- return len(m) >= 1
-
-
-def is_ipv6_ok(soft_fail=False):
- """
- Check if IPv6 support is present and ip6tables functional
-
- :param soft_fail: If set to True and IPv6 support is broken, then reports
- that the host doesn't have IPv6 support, otherwise a
- UFWIPv6Error exception is raised.
- :returns: True if IPv6 is working, False otherwise
- """
-
- # do we have IPv6 in the machine?
- if os.path.isdir('/proc/sys/net/ipv6'):
- # is ip6tables kernel module loaded?
- if not is_module_loaded('ip6_tables'):
- # ip6tables support isn't complete, let's try to load it
- try:
- modprobe('ip6_tables')
- # great, we can load the module
- return True
- except subprocess.CalledProcessError as ex:
- hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
- level="WARN")
- # we are in a world where ip6tables isn't working
- if soft_fail:
- # so we inform that the machine doesn't have IPv6
- return False
- else:
- raise UFWIPv6Error("IPv6 firewall support broken")
- else:
- # the module is present :)
- return True
-
- else:
- # the system doesn't have IPv6
- return False
-
-
-def disable_ipv6():
- """
- Disable ufw IPv6 support in /etc/default/ufw
- """
- exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
- '/etc/default/ufw'])
- if exit_code == 0:
- hookenv.log('IPv6 support in ufw disabled', level='INFO')
- else:
- hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
- raise UFWError("Couldn't disable IPv6 support in ufw")
-
-
-def enable(soft_fail=False):
- """
- Enable ufw
-
- :param soft_fail: If set to True silently disables IPv6 support in ufw,
- otherwise a UFWIPv6Error exception is raised when IP6
- support is broken.
- :returns: True if ufw is successfully enabled
- """
- if is_enabled():
- return True
-
- if not is_ipv6_ok(soft_fail):
- disable_ipv6()
-
- output = subprocess.check_output(['ufw', 'enable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall('^Firewall is active and enabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be enabled", level='WARN')
- return False
- else:
- hookenv.log("ufw enabled", level='INFO')
- return True
-
-
-def disable():
- """
- Disable ufw
-
- :returns: True if ufw is successfully disabled
- """
- if not is_enabled():
- return True
-
- output = subprocess.check_output(['ufw', 'disable'],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
-
- m = re.findall(r'^Firewall stopped and disabled on system startup\n',
- output, re.M)
- hookenv.log(output, level='DEBUG')
-
- if len(m) == 0:
- hookenv.log("ufw couldn't be disabled", level='WARN')
- return False
- else:
- hookenv.log("ufw disabled", level='INFO')
- return True
-
-
-def default_policy(policy='deny', direction='incoming'):
- """
- Changes the default policy for traffic `direction`
-
- :param policy: allow, deny or reject
- :param direction: traffic direction, possible values: incoming, outgoing,
- routed
- """
- if policy not in ['allow', 'deny', 'reject']:
- raise UFWError(('Unknown policy %s, valid values: '
- 'allow, deny, reject') % policy)
-
- if direction not in ['incoming', 'outgoing', 'routed']:
- raise UFWError(('Unknown direction %s, valid values: '
- 'incoming, outgoing, routed') % direction)
-
- output = subprocess.check_output(['ufw', 'default', policy, direction],
- universal_newlines=True,
- env={'LANG': 'en_US',
- 'PATH': os.environ['PATH']})
- hookenv.log(output, level='DEBUG')
-
- m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
- policy),
- output, re.M)
- if len(m) == 0:
- hookenv.log("ufw couldn't change the default policy to %s for %s"
- % (policy, direction), level='WARN')
- return False
- else:
- hookenv.log("ufw default policy for %s changed to %s"
- % (direction, policy), level='INFO')
- return True
-
-
-def modify_access(src, dst='any', port=None, proto=None, action='allow',
- index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param action: `allow` or `delete`
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- if not is_enabled():
- hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
- return
-
- if action == 'delete':
- cmd = ['ufw', 'delete', 'allow']
- elif index is not None:
- cmd = ['ufw', 'insert', str(index), action]
- else:
- cmd = ['ufw', action]
-
- if src is not None:
- cmd += ['from', src]
-
- if dst is not None:
- cmd += ['to', dst]
-
- if port is not None:
- cmd += ['port', str(port)]
-
- if proto is not None:
- cmd += ['proto', proto]
-
- hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
-
- hookenv.log(stdout, level='INFO')
-
- if p.returncode != 0:
- hookenv.log(stderr, level='ERROR')
- hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
- p.returncode),
- level='ERROR')
-
-
-def grant_access(src, dst='any', port=None, proto=None, index=None):
- """
- Grant access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- :param index: if different from None the rule is inserted at the given
- `index`.
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
- index=index)
-
-
-def revoke_access(src, dst='any', port=None, proto=None):
- """
- Revoke access to an address or subnet
-
- :param src: address (e.g. 192.168.1.234) or subnet
- (e.g. 192.168.1.0/24).
- :param dst: destiny of the connection, if the machine has multiple IPs and
- connections to only one of those have to accepted this is the
- field has to be set.
- :param port: destiny port
- :param proto: protocol (tcp or udp)
- """
- return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
-
-
-def service(name, action):
- """
- Open/close access to a service
-
- :param name: could be a service name defined in `/etc/services` or a port
- number.
- :param action: `open` or `close`
- """
- if action == 'open':
- subprocess.check_output(['ufw', 'allow', str(name)],
- universal_newlines=True)
- elif action == 'close':
- subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
- universal_newlines=True)
- else:
- raise UFWError(("'{}' not supported, use 'allow' "
- "or 'delete'").format(action))
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/alternatives.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/alternatives.py
deleted file mode 100644
index ef77caf..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-''' Helper for managing alternatives for file conflict resolution '''
-
-import subprocess
-import shutil
-import os
-
-
-def install_alternative(name, target, source, priority=50):
- ''' Install alternative configuration '''
- if (os.path.exists(target) and not os.path.islink(target)):
- # Move existing file/directory away before installing
- shutil.move(target, '{}.bak'.format(target))
- cmd = [
- 'update-alternatives', '--force', '--install',
- target, name, source, str(priority)
- ]
- subprocess.check_call(cmd)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 722bc64..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None, stable=True):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.openstack = openstack
- self.source = source
- self.stable = stable
- # Note(coreycb): this needs to be changed when new next branches come
- # out.
- self.current_next = "trusty"
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- # Charms outside the lp:~openstack-charmers namespace
- base_charms = ['mysql', 'mongodb', 'nrpe']
-
- # Force these charms to current series even when using an older series.
- # ie. Use trusty/nrpe even when series is precise, as the P charm
- # does not possess the necessary external master config and hooks.
- force_series_current = ['nrpe']
-
- if self.series in ['precise', 'trusty']:
- base_series = self.series
- else:
- base_series = self.current_next
-
- for svc in other_services:
- if svc['name'] in force_series_current:
- base_series = self.current_next
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if self.stable:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- if svc['name'] in base_charms:
- temp = 'lp:charms/{}/{}'
- svc['location'] = temp.format(base_series,
- svc['name'])
- else:
- temp = 'lp:~openstack-charmers/charms/{}/{}/next'
- svc['location'] = temp.format(self.current_next,
- svc['name'])
-
- return other_services
-
- def _add_services(self, this_service, other_services):
- """Add services to the deployment and set openstack-origin/source."""
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- # Charms which should use the source config option
- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw']
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
- self.precise_havana, self.precise_icehouse,
- self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
- self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
- self.wily_liberty) = range(12)
-
- releases = {
- ('precise', None): self.precise_essex,
- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
- ('precise', 'cloud:precise-havana'): self.precise_havana,
- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('utopic', None): self.utopic_juno,
- ('vivid', None): self.vivid_kilo,
- ('wily', None): self.wily_liberty}
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
- pools = [
- 'rbd',
- 'cinder',
- 'glance'
- ]
- else:
- # Juno or earlier
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index b139741..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,963 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import amulet
-import json
-import logging
-import os
-import six
-import time
-import urllib
-
-import cinderclient.v1.client as cinder_client
-import glanceclient.v1.client as glance_client
-import heatclient.v1.client as heat_client
-import keystoneclient.v2_0 as keystone_client
-import novaclient.v1_1.client as nova_client
-import pika
-import swiftclient
-
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'tenantId': act.tenantId,
- 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
- """Authenticates admin user with cinder."""
- # NOTE(beisner): cinder python client doesn't accept tokens.
- service_ip = \
- keystone_sentry.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- unit = keystone_sentry
- service_ip = unit.relation('shared-db',
- 'mysql:shared-db')['private-address']
- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return keystone_client.Client(username=user, password=password,
- tenant_name=tenant, auth_url=ep)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='adminURL')
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- endpoint_type='publicURL')
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return nova_client.Client(username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- endpoint_type='publicURL')
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Download cirros image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
-
- # Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.images.find(name=image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
-# rabbitmq/amqp specific helpers:
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not port and not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.configure('rabbitmq-server', config)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.configure('rabbitmq-server', config)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.server_properties['product'] == 'RabbitMQ'
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/context.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/context.py
deleted file mode 100644
index 1248d49..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/context.py
+++ /dev/null
@@ -1,1416 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import re
-import time
-from base64 import b64decode
-from subprocess import check_call
-
-import six
-import yaml
-
-from charmhelpers.fetch import (
- apt_install,
- filter_installed_packages,
-)
-from charmhelpers.core.hookenv import (
- config,
- is_relation_made,
- local_unit,
- log,
- relation_get,
- relation_ids,
- related_units,
- relation_set,
- unit_get,
- unit_private_ip,
- charm_name,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-
-from charmhelpers.core.sysctl import create as sysctl_create
-from charmhelpers.core.strutils import bool_from_string
-
-from charmhelpers.core.host import (
- get_bond_master,
- is_phy_iface,
- list_nics,
- get_nic_hwaddr,
- mkdir,
- write_file,
-)
-from charmhelpers.contrib.hahelpers.cluster import (
- determine_apache_port,
- determine_api_port,
- https,
- is_clustered,
-)
-from charmhelpers.contrib.hahelpers.apache import (
- get_cert,
- get_ca_cert,
- install_ca_cert,
-)
-from charmhelpers.contrib.openstack.neutron import (
- neutron_plugin_attribute,
- parse_data_port_mappings,
-)
-from charmhelpers.contrib.openstack.ip import (
- resolve_address,
- INTERNAL,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- get_ipv4_addr,
- get_ipv6_addr,
- get_netmask_for_address,
- format_ipv6_addr,
- is_address_in_network,
- is_bridge_member,
-)
-from charmhelpers.contrib.openstack.utils import get_host_ip
-CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
-ADDRESS_TYPES = ['admin', 'internal', 'public']
-
-
-class OSContextError(Exception):
- pass
-
-
-def ensure_packages(packages):
- """Install but do not upgrade required plugin packages."""
- required = filter_installed_packages(packages)
- if required:
- apt_install(required, fatal=True)
-
-
-def context_complete(ctxt):
- _missing = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- _missing.append(k)
-
- if _missing:
- log('Missing required data: %s' % ' '.join(_missing), level=INFO)
- return False
-
- return True
-
-
-def config_flags_parser(config_flags):
- """Parses config flags string into dict.
-
- This parsing method supports a few different formats for the config
- flag values to be parsed:
-
- 1. A string in the simple format of key=value pairs, with the possibility
- of specifying multiple key value pairs within the same string. For
- example, a string in the format of 'key1=value1, key2=value2' will
- return a dict of:
-
- {'key1': 'value1',
- 'key2': 'value2'}.
-
- 2. A string in the above format, but supporting a comma-delimited list
- of values for the same key. For example, a string in the format of
- 'key1=value1, key2=value3,value4,value5' will return a dict of:
-
- {'key1', 'value1',
- 'key2', 'value2,value3,value4'}
-
- 3. A string containing a colon character (:) prior to an equal
- character (=) will be treated as yaml and parsed as such. This can be
- used to specify more complex key value pairs. For example,
- a string in the format of 'key1: subkey1=value1, subkey2=value2' will
- return a dict of:
-
- {'key1', 'subkey1=value1, subkey2=value2'}
-
- The provided config_flags string may be a list of comma-separated values
- which themselves may be comma-separated list of values.
- """
- # If we find a colon before an equals sign then treat it as yaml.
- # Note: limit it to finding the colon first since this indicates assignment
- # for inline yaml.
- colon = config_flags.find(':')
- equals = config_flags.find('=')
- if colon > 0:
- if colon < equals or equals < 0:
- return yaml.safe_load(config_flags)
-
- if config_flags.find('==') >= 0:
- log("config_flags is not in expected format (key=value)", level=ERROR)
- raise OSContextError
-
- # strip the following from each value.
- post_strippers = ' ,'
- # we strip any leading/trailing '=' or ' ' from the string then
- # split on '='.
- split = config_flags.strip(' =').split('=')
- limit = len(split)
- flags = {}
- for i in range(0, limit - 1):
- current = split[i]
- next = split[i + 1]
- vindex = next.rfind(',')
- if (i == limit - 2) or (vindex < 0):
- value = next
- else:
- value = next[:vindex]
-
- if i == 0:
- key = current
- else:
- # if this not the first entry, expect an embedded key.
- index = current.rfind(',')
- if index < 0:
- log("Invalid config value(s) at index %s" % (i), level=ERROR)
- raise OSContextError
- key = current[index + 1:]
-
- # Add to collection.
- flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
-
- return flags
-
-
-class OSContextGenerator(object):
- """Base class for all context generators."""
- interfaces = []
- related = False
- complete = False
- missing_data = []
-
- def __call__(self):
- raise NotImplementedError
-
- def context_complete(self, ctxt):
- """Check for missing data for the required context data.
- Set self.missing_data if it exists and return False.
- Set self.complete if no missing data and return True.
- """
- # Fresh start
- self.complete = False
- self.missing_data = []
- for k, v in six.iteritems(ctxt):
- if v is None or v == '':
- if k not in self.missing_data:
- self.missing_data.append(k)
-
- if self.missing_data:
- self.complete = False
- log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
- else:
- self.complete = True
- return self.complete
-
- def get_related(self):
- """Check if any of the context interfaces have relation ids.
- Set self.related and return True if one of the interfaces
- has relation ids.
- """
- # Fresh start
- self.related = False
- try:
- for interface in self.interfaces:
- if relation_ids(interface):
- self.related = True
- return self.related
- except AttributeError as e:
- log("{} {}"
- "".format(self, e), 'INFO')
- return self.related
-
-
-class SharedDBContext(OSContextGenerator):
- interfaces = ['shared-db']
-
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
- """Allows inspecting relation for settings prefixed with
- relation_prefix. This is useful for parsing access for multiple
- databases returned via the shared-db interface (eg, nova_password,
- quantum_password)
- """
- self.relation_prefix = relation_prefix
- self.database = database
- self.user = user
- self.ssl_dir = ssl_dir
- self.rel_name = self.interfaces[0]
-
- def __call__(self):
- self.database = self.database or config('database')
- self.user = self.user or config('database-user')
- if None in [self.database, self.user]:
- log("Could not generate shared_db context. Missing required charm "
- "config options. (database name and user)", level=ERROR)
- raise OSContextError
-
- ctxt = {}
-
- # NOTE(jamespage) if mysql charm provides a network upon which
- # access to the database should be made, reconfigure relation
- # with the service units local address and defer execution
- access_network = relation_get('access-network')
- if access_network is not None:
- if self.relation_prefix is not None:
- hostname_key = "{}_hostname".format(self.relation_prefix)
- else:
- hostname_key = "hostname"
- access_hostname = get_address_in_network(access_network,
- unit_get('private-address'))
- set_hostname = relation_get(attribute=hostname_key,
- unit=local_unit())
- if set_hostname != access_hostname:
- relation_set(relation_settings={hostname_key: access_hostname})
- return None # Defer any further hook execution for now....
-
- password_setting = 'password'
- if self.relation_prefix:
- password_setting = self.relation_prefix + '_password'
-
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- host = rdata.get('db_host')
- host = format_ipv6_addr(host) or host
- ctxt = {
- 'database_host': host,
- 'database': self.database,
- 'database_user': self.user,
- 'database_password': rdata.get(password_setting),
- 'database_type': 'mysql'
- }
- if self.context_complete(ctxt):
- db_ssl(rdata, ctxt, self.ssl_dir)
- return ctxt
- return {}
-
-
-class PostgresqlDBContext(OSContextGenerator):
- interfaces = ['pgsql-db']
-
- def __init__(self, database=None):
- self.database = database
-
- def __call__(self):
- self.database = self.database or config('database')
- if self.database is None:
- log('Could not generate postgresql_db context. Missing required '
- 'charm config options. (database name)', level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.interfaces[0]):
- self.related = True
- for unit in related_units(rid):
- rel_host = relation_get('host', rid=rid, unit=unit)
- rel_user = relation_get('user', rid=rid, unit=unit)
- rel_passwd = relation_get('password', rid=rid, unit=unit)
- ctxt = {'database_host': rel_host,
- 'database': self.database,
- 'database_user': rel_user,
- 'database_password': rel_passwd,
- 'database_type': 'postgresql'}
- if self.context_complete(ctxt):
- return ctxt
-
- return {}
-
-
-def db_ssl(rdata, ctxt, ssl_dir):
- if 'ssl_ca' in rdata and ssl_dir:
- ca_path = os.path.join(ssl_dir, 'db-client.ca')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_ca']))
-
- ctxt['database_ssl_ca'] = ca_path
- elif 'ssl_ca' in rdata:
- log("Charm not setup for ssl support but ssl ca found", level=INFO)
- return ctxt
-
- if 'ssl_cert' in rdata:
- cert_path = os.path.join(
- ssl_dir, 'db-client.cert')
- if not os.path.exists(cert_path):
- log("Waiting 1m for ssl client cert validity", level=INFO)
- time.sleep(60)
-
- with open(cert_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_cert']))
-
- ctxt['database_ssl_cert'] = cert_path
- key_path = os.path.join(ssl_dir, 'db-client.key')
- with open(key_path, 'w') as fh:
- fh.write(b64decode(rdata['ssl_key']))
-
- ctxt['database_ssl_key'] = key_path
-
- return ctxt
-
-
-class IdentityServiceContext(OSContextGenerator):
-
- def __init__(self, service=None, service_user=None, rel_name='identity-service'):
- self.service = service
- self.service_user = service_user
- self.rel_name = rel_name
- self.interfaces = [self.rel_name]
-
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
- if self.service and self.service_user:
- # This is required for pki token signing if we don't want /tmp to
- # be used.
- cachedir = '/var/cache/%s' % (self.service)
- if not os.path.isdir(cachedir):
- log("Creating service cache dir %s" % (cachedir), level=DEBUG)
- mkdir(path=cachedir, owner=self.service_user,
- group=self.service_user, perms=0o700)
-
- ctxt['signing_dir'] = cachedir
-
- for rid in relation_ids(self.rel_name):
- self.related = True
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- serv_host = rdata.get('service_host')
- serv_host = format_ipv6_addr(serv_host) or serv_host
- auth_host = rdata.get('auth_host')
- auth_host = format_ipv6_addr(auth_host) or auth_host
- svc_protocol = rdata.get('service_protocol') or 'http'
- auth_protocol = rdata.get('auth_protocol') or 'http'
- ctxt.update({'service_port': rdata.get('service_port'),
- 'service_host': serv_host,
- 'auth_host': auth_host,
- 'auth_port': rdata.get('auth_port'),
- 'admin_tenant_name': rdata.get('service_tenant'),
- 'admin_user': rdata.get('service_username'),
- 'admin_password': rdata.get('service_password'),
- 'service_protocol': svc_protocol,
- 'auth_protocol': auth_protocol})
-
- if self.context_complete(ctxt):
- # NOTE(jamespage) this is required for >= icehouse
- # so a missing value just indicates keystone needs
- # upgrading
- ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
- return ctxt
-
- return {}
-
-
-class AMQPContext(OSContextGenerator):
-
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
- self.ssl_dir = ssl_dir
- self.rel_name = rel_name
- self.relation_prefix = relation_prefix
- self.interfaces = [rel_name]
-
- def __call__(self):
- log('Generating template context for amqp', level=DEBUG)
- conf = config()
- if self.relation_prefix:
- user_setting = '%s-rabbit-user' % (self.relation_prefix)
- vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
- else:
- user_setting = 'rabbit-user'
- vhost_setting = 'rabbit-vhost'
-
- try:
- username = conf[user_setting]
- vhost = conf[vhost_setting]
- except KeyError as e:
- log('Could not generate shared_db context. Missing required charm '
- 'config options: %s.' % e, level=ERROR)
- raise OSContextError
-
- ctxt = {}
- for rid in relation_ids(self.rel_name):
- ha_vip_only = False
- self.related = True
- for unit in related_units(rid):
- if relation_get('clustered', rid=rid, unit=unit):
- ctxt['clustered'] = True
- vip = relation_get('vip', rid=rid, unit=unit)
- vip = format_ipv6_addr(vip) or vip
- ctxt['rabbitmq_host'] = vip
- else:
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- ctxt['rabbitmq_host'] = host
-
- ctxt.update({
- 'rabbitmq_user': username,
- 'rabbitmq_password': relation_get('password', rid=rid,
- unit=unit),
- 'rabbitmq_virtual_host': vhost,
- })
-
- ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
- if ssl_port:
- ctxt['rabbit_ssl_port'] = ssl_port
-
- ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
- if ssl_ca:
- ctxt['rabbit_ssl_ca'] = ssl_ca
-
- if relation_get('ha_queues', rid=rid, unit=unit) is not None:
- ctxt['rabbitmq_ha_queues'] = True
-
- ha_vip_only = relation_get('ha-vip-only',
- rid=rid, unit=unit) is not None
-
- if self.context_complete(ctxt):
- if 'rabbit_ssl_ca' in ctxt:
- if not self.ssl_dir:
- log("Charm not setup for ssl support but ssl ca "
- "found", level=INFO)
- break
-
- ca_path = os.path.join(
- self.ssl_dir, 'rabbit-client-ca.pem')
- with open(ca_path, 'w') as fh:
- fh.write(b64decode(ctxt['rabbit_ssl_ca']))
- ctxt['rabbit_ssl_ca'] = ca_path
-
- # Sufficient information found = break out!
- break
-
- # Used for active/active rabbitmq >= grizzly
- if (('clustered' not in ctxt or ha_vip_only) and
- len(related_units(rid)) > 1):
- rabbitmq_hosts = []
- for unit in related_units(rid):
- host = relation_get('private-address', rid=rid, unit=unit)
- host = format_ipv6_addr(host) or host
- rabbitmq_hosts.append(host)
-
- ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
-
- oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
- if oslo_messaging_flags:
- ctxt['oslo_messaging_flags'] = config_flags_parser(
- oslo_messaging_flags)
-
- if not self.complete:
- return {}
-
- return ctxt
-
-
-class CephContext(OSContextGenerator):
- """Generates context for /etc/ceph/ceph.conf templates."""
- interfaces = ['ceph']
-
- def __call__(self):
- if not relation_ids('ceph'):
- return {}
-
- log('Generating template context for ceph', level=DEBUG)
- mon_hosts = []
- ctxt = {
- 'use_syslog': str(config('use-syslog')).lower()
- }
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- if not ctxt.get('auth'):
- ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
- if not ctxt.get('key'):
- ctxt['key'] = relation_get('key', rid=rid, unit=unit)
- ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
- unit=unit)
- unit_priv_addr = relation_get('private-address', rid=rid,
- unit=unit)
- ceph_addr = ceph_pub_addr or unit_priv_addr
- ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
- mon_hosts.append(ceph_addr)
-
- ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
-
- if not os.path.isdir('/etc/ceph'):
- os.mkdir('/etc/ceph')
-
- if not self.context_complete(ctxt):
- return {}
-
- ensure_packages(['ceph-common'])
- return ctxt
-
-
-class HAProxyContext(OSContextGenerator):
- """Provides half a context for the haproxy template, which describes
- all peers to be included in the cluster. Each charm needs to include
- its own context generator that describes the port mapping.
- """
- interfaces = ['cluster']
-
- def __init__(self, singlenode_mode=False):
- self.singlenode_mode = singlenode_mode
-
- def __call__(self):
- if not relation_ids('cluster') and not self.singlenode_mode:
- return {}
-
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
- l_unit = local_unit().replace('/', '-')
- cluster_hosts = {}
-
- # NOTE(jamespage): build out map of configured network endpoints
- # and associated backends
- for addr_type in ADDRESS_TYPES:
- cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
- if laddr:
- netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('{}-address'.format(addr_type),
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[laddr]['backends'][_unit] = _laddr
-
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
- # match in the frontend
- cluster_hosts[addr] = {}
- netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
- for rid in relation_ids('cluster'):
- for unit in related_units(rid):
- _laddr = relation_get('private-address',
- rid=rid, unit=unit)
- if _laddr:
- _unit = unit.replace('/', '-')
- cluster_hosts[addr]['backends'][_unit] = _laddr
-
- ctxt = {
- 'frontends': cluster_hosts,
- 'default_backend': addr
- }
-
- if config('haproxy-server-timeout'):
- ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
-
- if config('haproxy-client-timeout'):
- ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
-
- if config('prefer-ipv6'):
- ctxt['ipv6'] = True
- ctxt['local_host'] = 'ip6-localhost'
- ctxt['haproxy_host'] = '::'
- ctxt['stat_port'] = ':::8888'
- else:
- ctxt['local_host'] = '127.0.0.1'
- ctxt['haproxy_host'] = '0.0.0.0'
- ctxt['stat_port'] = ':8888'
-
- for frontend in cluster_hosts:
- if (len(cluster_hosts[frontend]['backends']) > 1 or
- self.singlenode_mode):
- # Enable haproxy when we have enough peers.
- log('Ensuring haproxy enabled in /etc/default/haproxy.',
- level=DEBUG)
- with open('/etc/default/haproxy', 'w') as out:
- out.write('ENABLED=1\n')
-
- return ctxt
-
- log('HAProxy context is incomplete, this unit has no peers.',
- level=INFO)
- return {}
-
-
-class ImageServiceContext(OSContextGenerator):
- interfaces = ['image-service']
-
- def __call__(self):
- """Obtains the glance API server from the image-service relation.
- Useful in nova and cinder (currently).
- """
- log('Generating template context for image-service.', level=DEBUG)
- rids = relation_ids('image-service')
- if not rids:
- return {}
-
- for rid in rids:
- for unit in related_units(rid):
- api_server = relation_get('glance-api-server',
- rid=rid, unit=unit)
- if api_server:
- return {'glance_api_servers': api_server}
-
- log("ImageService context is incomplete. Missing required relation "
- "data.", level=INFO)
- return {}
-
-
-class ApacheSSLContext(OSContextGenerator):
- """Generates a context for an apache vhost configuration that configures
- HTTPS reverse proxying for one or many endpoints. Generated context
- looks something like::
-
- {
- 'namespace': 'cinder',
- 'private_address': 'iscsi.mycinderhost.com',
- 'endpoints': [(8776, 8766), (8777, 8767)]
- }
-
- The endpoints list consists of a tuples mapping external ports
- to internal ports.
- """
- interfaces = ['https']
-
- # charms should inherit this context and set external ports
- # and service namespace accordingly.
- external_ports = []
- service_namespace = None
-
- def enable_modules(self):
- cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
- check_call(cmd)
-
- def configure_cert(self, cn=None):
- ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
- mkdir(path=ssl_dir)
- cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
-
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert))
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key))
-
- def configure_ca(self):
- ca_cert = get_ca_cert()
- if ca_cert:
- install_ca_cert(b64decode(ca_cert))
-
- def canonical_names(self):
- """Figure out which canonical names clients will access this service.
- """
- cns = []
- for r_id in relation_ids('identity-service'):
- for unit in related_units(r_id):
- rdata = relation_get(rid=r_id, unit=unit)
- for k in rdata:
- if k.startswith('ssl_key_'):
- cns.append(k.lstrip('ssl_key_'))
-
- return sorted(list(set(cns)))
-
- def get_network_addresses(self):
- """For each network configured, return corresponding address and vip
- (if available).
-
- Returns a list of tuples of the form:
-
- [(address_in_net_a, vip_in_net_a),
- (address_in_net_b, vip_in_net_b),
- ...]
-
- or, if no vip(s) available:
-
- [(address_in_net_a, address_in_net_a),
- (address_in_net_b, address_in_net_b),
- ...]
- """
- addresses = []
- if config('vip'):
- vips = config('vip').split()
- else:
- vips = []
-
- for net_type in ['os-internal-network', 'os-admin-network',
- 'os-public-network']:
- addr = get_address_in_network(config(net_type),
- unit_get('private-address'))
- if len(vips) > 1 and is_clustered():
- if not config(net_type):
- log("Multiple networks configured but net_type "
- "is None (%s)." % net_type, level=WARNING)
- continue
-
- for vip in vips:
- if is_address_in_network(config(net_type), vip):
- addresses.append((addr, vip))
- break
-
- elif is_clustered() and config('vip'):
- addresses.append((addr, config('vip')))
- else:
- addresses.append((addr, addr))
-
- return sorted(addresses)
-
- def __call__(self):
- if isinstance(self.external_ports, six.string_types):
- self.external_ports = [self.external_ports]
-
- if not self.external_ports or not https():
- return {}
-
- self.configure_ca()
- self.enable_modules()
-
- ctxt = {'namespace': self.service_namespace,
- 'endpoints': [],
- 'ext_ports': []}
-
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
-
- addresses = self.get_network_addresses()
- for address, endpoint in sorted(set(addresses)):
- for api_port in self.external_ports:
- ext_port = determine_apache_port(api_port,
- singlenode_mode=True)
- int_port = determine_api_port(api_port, singlenode_mode=True)
- portmap = (address, endpoint, int(ext_port), int(int_port))
- ctxt['endpoints'].append(portmap)
- ctxt['ext_ports'].append(int(ext_port))
-
- ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
- return ctxt
-
-
-class NeutronContext(OSContextGenerator):
- interfaces = []
-
- @property
- def plugin(self):
- return None
-
- @property
- def network_manager(self):
- return None
-
- @property
- def packages(self):
- return neutron_plugin_attribute(self.plugin, 'packages',
- self.network_manager)
-
- @property
- def neutron_security_groups(self):
- return None
-
- def _ensure_packages(self):
- for pkgs in self.packages:
- ensure_packages(pkgs)
-
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
- def ovs_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'ovs',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return ovs_ctxt
-
- def nuage_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nuage_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'vsp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nuage_ctxt
-
- def nvp_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- nvp_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'nvp',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return nvp_ctxt
-
- def n1kv_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- n1kv_user_config_flags = config('n1kv-config-flags')
- restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
- n1kv_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'n1kv',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': n1kv_config,
- 'vsm_ip': config('n1kv-vsm-ip'),
- 'vsm_username': config('n1kv-vsm-username'),
- 'vsm_password': config('n1kv-vsm-password'),
- 'restrict_policy_profiles': restrict_policy_profiles}
-
- if n1kv_user_config_flags:
- flags = config_flags_parser(n1kv_user_config_flags)
- n1kv_ctxt['user_config_flags'] = flags
-
- return n1kv_ctxt
-
- def calico_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- calico_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'Calico',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
-
- return calico_ctxt
-
- def neutron_ctxt(self):
- if https():
- proto = 'https'
- else:
- proto = 'http'
-
- if is_clustered():
- host = config('vip')
- else:
- host = unit_get('private-address')
-
- ctxt = {'network_manager': self.network_manager,
- 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
- return ctxt
-
- def pg_ctxt(self):
- driver = neutron_plugin_attribute(self.plugin, 'driver',
- self.network_manager)
- config = neutron_plugin_attribute(self.plugin, 'config',
- self.network_manager)
- ovs_ctxt = {'core_plugin': driver,
- 'neutron_plugin': 'plumgrid',
- 'neutron_security_groups': self.neutron_security_groups,
- 'local_ip': unit_private_ip(),
- 'config': config}
- return ovs_ctxt
-
- def __call__(self):
- if self.network_manager not in ['quantum', 'neutron']:
- return {}
-
- if not self.plugin:
- return {}
-
- ctxt = self.neutron_ctxt()
-
- if self.plugin == 'ovs':
- ctxt.update(self.ovs_ctxt())
- elif self.plugin in ['nvp', 'nsx']:
- ctxt.update(self.nvp_ctxt())
- elif self.plugin == 'n1kv':
- ctxt.update(self.n1kv_ctxt())
- elif self.plugin == 'Calico':
- ctxt.update(self.calico_ctxt())
- elif self.plugin == 'vsp':
- ctxt.update(self.nuage_ctxt())
- elif self.plugin == 'plumgrid':
- ctxt.update(self.pg_ctxt())
-
- alchemy_flags = config('neutron-alchemy-flags')
- if alchemy_flags:
- flags = config_flags_parser(alchemy_flags)
- ctxt['neutron_alchemy_flags'] = flags
-
- self._save_flag_file()
- return ctxt
-
-
-class NeutronPortContext(OSContextGenerator):
-
- def resolve_ports(self, ports):
- """Resolve NICs not yet bound to bridge(s)
-
- If hwaddress provided then returns resolved hwaddress otherwise NIC.
- """
- if not ports:
- return None
-
- hwaddr_to_nic = {}
- hwaddr_to_ip = {}
- for nic in list_nics():
- # Ignore virtual interfaces (bond masters will be identified from
- # their slaves)
- if not is_phy_iface(nic):
- continue
-
- _nic = get_bond_master(nic)
- if _nic:
- log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
- level=DEBUG)
- nic = _nic
-
- hwaddr = get_nic_hwaddr(nic)
- hwaddr_to_nic[hwaddr] = nic
- addresses = get_ipv4_addr(nic, fatal=False)
- addresses += get_ipv6_addr(iface=nic, fatal=False)
- hwaddr_to_ip[hwaddr] = addresses
-
- resolved = []
- mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
- for entry in ports:
- if re.match(mac_regex, entry):
- # NIC is in known NICs and does NOT hace an IP address
- if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
- # If the nic is part of a bridge then don't use it
- if is_bridge_member(hwaddr_to_nic[entry]):
- continue
-
- # Entry is a MAC address for a valid interface that doesn't
- # have an IP address assigned yet.
- resolved.append(hwaddr_to_nic[entry])
- else:
- # If the passed entry is not a MAC address, assume it's a valid
- # interface, and that the user put it there on purpose (we can
- # trust it to be the real external network).
- resolved.append(entry)
-
- # Ensure no duplicates
- return list(set(resolved))
-
-
-class OSConfigFlagContext(OSContextGenerator):
- """Provides support for user-defined config flags.
-
- Users can define a comma-seperated list of key=value pairs
- in the charm configuration and apply them at any point in
- any file by using a template flag.
-
- Sometimes users might want config flags inserted within a
- specific section so this class allows users to specify the
- template flag name, allowing for multiple template flags
- (sections) within the same context.
-
- NOTE: the value of config-flags may be a comma-separated list of
- key=value pairs and some Openstack config files support
- comma-separated lists as values.
- """
-
- def __init__(self, charm_flag='config-flags',
- template_flag='user_config_flags'):
- """
- :param charm_flag: config flags in charm configuration.
- :param template_flag: insert point for user-defined flags in template
- file.
- """
- super(OSConfigFlagContext, self).__init__()
- self._charm_flag = charm_flag
- self._template_flag = template_flag
-
- def __call__(self):
- config_flags = config(self._charm_flag)
- if not config_flags:
- return {}
-
- return {self._template_flag:
- config_flags_parser(config_flags)}
-
-
-class SubordinateConfigContext(OSContextGenerator):
-
- """
- Responsible for inspecting relations to subordinates that
- may be exporting required config via a json blob.
-
- The subordinate interface allows subordinates to export their
- configuration requirements to the principle for multiple config
- files and multiple serivces. Ie, a subordinate that has interfaces
- to both glance and nova may export to following yaml blob as json::
-
- glance:
- /etc/glance/glance-api.conf:
- sections:
- DEFAULT:
- - [key1, value1]
- /etc/glance/glance-registry.conf:
- MYSECTION:
- - [key2, value2]
- nova:
- /etc/nova/nova.conf:
- sections:
- DEFAULT:
- - [key3, value3]
-
-
- It is then up to the principle charms to subscribe this context to
- the service+config file it is interestd in. Configuration data will
- be available in the template context, in glance's case, as::
-
- ctxt = {
- ... other context ...
- 'subordinate_config': {
- 'DEFAULT': {
- 'key1': 'value1',
- },
- 'MYSECTION': {
- 'key2': 'value2',
- },
- }
- }
- """
-
- def __init__(self, service, config_file, interface):
- """
- :param service : Service name key to query in any subordinate
- data found
- :param config_file : Service's config file to query sections
- :param interface : Subordinate interface to inspect
- """
- self.config_file = config_file
- if isinstance(service, list):
- self.services = service
- else:
- self.services = [service]
- if isinstance(interface, list):
- self.interfaces = interface
- else:
- self.interfaces = [interface]
-
- def __call__(self):
- ctxt = {'sections': {}}
- rids = []
- for interface in self.interfaces:
- rids.extend(relation_ids(interface))
- for rid in rids:
- for unit in related_units(rid):
- sub_config = relation_get('subordinate_configuration',
- rid=rid, unit=unit)
- if sub_config and sub_config != '':
- try:
- sub_config = json.loads(sub_config)
- except:
- log('Could not parse JSON from subordinate_config '
- 'setting from %s' % rid, level=ERROR)
- continue
-
- for service in self.services:
- if service not in sub_config:
- log('Found subordinate_config on %s but it contained'
- 'nothing for %s service' % (rid, service),
- level=INFO)
- continue
-
- sub_config = sub_config[service]
- if self.config_file not in sub_config:
- log('Found subordinate_config on %s but it contained'
- 'nothing for %s' % (rid, self.config_file),
- level=INFO)
- continue
-
- sub_config = sub_config[self.config_file]
- for k, v in six.iteritems(sub_config):
- if k == 'sections':
- for section, config_list in six.iteritems(v):
- log("adding section '%s'" % (section),
- level=DEBUG)
- if ctxt[k].get(section):
- ctxt[k][section].extend(config_list)
- else:
- ctxt[k][section] = config_list
- else:
- ctxt[k] = v
- log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
- return ctxt
-
-
-class LogLevelContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {}
- ctxt['debug'] = \
- False if config('debug') is None else config('debug')
- ctxt['verbose'] = \
- False if config('verbose') is None else config('verbose')
-
- return ctxt
-
-
-class SyslogContext(OSContextGenerator):
-
- def __call__(self):
- ctxt = {'use_syslog': config('use-syslog')}
- return ctxt
-
-
-class BindHostContext(OSContextGenerator):
-
- def __call__(self):
- if config('prefer-ipv6'):
- return {'bind_host': '::'}
- else:
- return {'bind_host': '0.0.0.0'}
-
-
-class WorkerConfigContext(OSContextGenerator):
-
- @property
- def num_cpus(self):
- try:
- from psutil import NUM_CPUS
- except ImportError:
- apt_install('python-psutil', fatal=True)
- from psutil import NUM_CPUS
-
- return NUM_CPUS
-
- def __call__(self):
- multiplier = config('worker-multiplier') or 0
- ctxt = {"workers": self.num_cpus * multiplier}
- return ctxt
-
-
-class ZeroMQContext(OSContextGenerator):
- interfaces = ['zeromq-configuration']
-
- def __call__(self):
- ctxt = {}
- if is_relation_made('zeromq-configuration', 'host'):
- for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
-
- return ctxt
-
-
-class NotificationDriverContext(OSContextGenerator):
-
- def __init__(self, zmq_relation='zeromq-configuration',
- amqp_relation='amqp'):
- """
- :param zmq_relation: Name of Zeromq relation to check
- """
- self.zmq_relation = zmq_relation
- self.amqp_relation = amqp_relation
-
- def __call__(self):
- ctxt = {'notifications': 'False'}
- if is_relation_made(self.amqp_relation):
- ctxt['notifications'] = "True"
-
- return ctxt
-
-
-class SysctlContext(OSContextGenerator):
- """This context check if the 'sysctl' option exists on configuration
- then creates a file with the loaded contents"""
- def __call__(self):
- sysctl_dict = config('sysctl')
- if sysctl_dict:
- sysctl_create(sysctl_dict,
- '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
- return {'sysctl': sysctl_dict}
-
-
-class NeutronAPIContext(OSContextGenerator):
- '''
- Inspects current neutron-plugin-api relation for neutron settings. Return
- defaults if it is not present.
- '''
- interfaces = ['neutron-plugin-api']
-
- def __call__(self):
- self.neutron_defaults = {
- 'l2_population': {
- 'rel_key': 'l2-population',
- 'default': False,
- },
- 'overlay_network_type': {
- 'rel_key': 'overlay-network-type',
- 'default': 'gre',
- },
- 'neutron_security_groups': {
- 'rel_key': 'neutron-security-groups',
- 'default': False,
- },
- 'network_device_mtu': {
- 'rel_key': 'network-device-mtu',
- 'default': None,
- },
- 'enable_dvr': {
- 'rel_key': 'enable-dvr',
- 'default': False,
- },
- 'enable_l3ha': {
- 'rel_key': 'enable-l3ha',
- 'default': False,
- },
- }
- ctxt = self.get_neutron_options({})
- for rid in relation_ids('neutron-plugin-api'):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if 'l2-population' in rdata:
- ctxt.update(self.get_neutron_options(rdata))
-
- return ctxt
-
- def get_neutron_options(self, rdata):
- settings = {}
- for nkey in self.neutron_defaults.keys():
- defv = self.neutron_defaults[nkey]['default']
- rkey = self.neutron_defaults[nkey]['rel_key']
- if rkey in rdata.keys():
- if type(defv) is bool:
- settings[nkey] = bool_from_string(rdata[rkey])
- else:
- settings[nkey] = rdata[rkey]
- else:
- settings[nkey] = defv
- return settings
-
-
-class ExternalPortContext(NeutronPortContext):
-
- def __call__(self):
- ctxt = {}
- ports = config('ext-port')
- if ports:
- ports = [p.strip() for p in ports.split()]
- ports = self.resolve_ports(ports)
- if ports:
- ctxt = {"ext_port": ports[0]}
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt['ext_port_mtu'] = mtu
-
- return ctxt
-
-
-class DataPortContext(NeutronPortContext):
-
- def __call__(self):
- ports = config('data-port')
- if ports:
- # Map of {port/mac:bridge}
- portmap = parse_data_port_mappings(ports)
- ports = portmap.keys()
- # Resolve provided ports or mac addresses and filter out those
- # already attached to a bridge.
- resolved = self.resolve_ports(ports)
- # FIXME: is this necessary?
- normalized = {get_nic_hwaddr(port): port for port in resolved
- if port not in ports}
- normalized.update({port: port for port in resolved
- if port in ports})
- if resolved:
- return {bridge: normalized[port] for port, bridge in
- six.iteritems(portmap) if port in normalized.keys()}
-
- return None
-
-
-class PhyNICMTUContext(DataPortContext):
-
- def __call__(self):
- ctxt = {}
- mappings = super(PhyNICMTUContext, self).__call__()
- if mappings and mappings.values():
- ports = mappings.values()
- napi_settings = NeutronAPIContext()()
- mtu = napi_settings.get('network_device_mtu')
- if mtu:
- ctxt["devs"] = '\\n'.join(ports)
- ctxt['mtu'] = mtu
-
- return ctxt
-
-
-class NetworkServiceContext(OSContextGenerator):
-
- def __init__(self, rel_name='quantum-network-service'):
- self.rel_name = rel_name
- self.interfaces = [rel_name]
-
- def __call__(self):
- for rid in relation_ids(self.rel_name):
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- ctxt = {
- 'keystone_host': rdata.get('keystone_host'),
- 'service_port': rdata.get('service_port'),
- 'auth_port': rdata.get('auth_port'),
- 'service_tenant': rdata.get('service_tenant'),
- 'service_username': rdata.get('service_username'),
- 'service_password': rdata.get('service_password'),
- 'quantum_host': rdata.get('quantum_host'),
- 'quantum_port': rdata.get('quantum_port'),
- 'quantum_url': rdata.get('quantum_url'),
- 'region': rdata.get('region'),
- 'service_protocol':
- rdata.get('service_protocol') or 'http',
- 'auth_protocol':
- rdata.get('auth_protocol') or 'http',
- }
- if self.context_complete(ctxt):
- return ctxt
- return {}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
deleted file mode 100755
index eb8527f..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-CRITICAL=0
-NOTACTIVE=''
-LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
-do
- output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
- if [ $? != 0 ]; then
- date >> $LOGFILE
- echo $output >> $LOGFILE
- /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
- CRITICAL=1
- NOTACTIVE="${NOTACTIVE} $appserver"
- fi
-done
-
-if [ $CRITICAL = 1 ]; then
- echo "CRITICAL:${NOTACTIVE}"
- exit 2
-fi
-
-echo "OK: All haproxy instances looking good"
-exit 0
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
deleted file mode 100755
index 3ebb532..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-#--------------------------------------------
-# This file is managed by Juju
-#--------------------------------------------
-#
-# Copyright 2009,2012 Canonical Ltd.
-# Author: Tom Haddon
-
-# These should be config options at some stage
-CURRQthrsh=0
-MAXQthrsh=100
-
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
-
-HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
-
-for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
-do
- CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
- MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
-
- if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
- echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
- exit 2
- fi
-done
-
-echo "OK: All haproxy queue depths looking good"
-exit 0
-
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/ip.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/ip.py
deleted file mode 100644
index 3dca6dc..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/ip.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from charmhelpers.core.hookenv import (
- config,
- unit_get,
- service_name,
-)
-from charmhelpers.contrib.network.ip import (
- get_address_in_network,
- is_address_in_network,
- is_ipv6,
- get_ipv6_addr,
-)
-from charmhelpers.contrib.hahelpers.cluster import is_clustered
-
-PUBLIC = 'public'
-INTERNAL = 'int'
-ADMIN = 'admin'
-
-ADDRESS_MAP = {
- PUBLIC: {
- 'config': 'os-public-network',
- 'fallback': 'public-address',
- 'override': 'os-public-hostname',
- },
- INTERNAL: {
- 'config': 'os-internal-network',
- 'fallback': 'private-address',
- 'override': 'os-internal-hostname',
- },
- ADMIN: {
- 'config': 'os-admin-network',
- 'fallback': 'private-address',
- 'override': 'os-admin-hostname',
- }
-}
-
-
-def canonical_url(configs, endpoint_type=PUBLIC):
- """Returns the correct HTTP URL to this host given the state of HTTPS
- configuration, hacluster and charm configuration.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :param endpoint_type: str endpoint type to resolve.
- :param returns: str base URL for services on the current service unit.
- """
- scheme = _get_scheme(configs)
-
- address = resolve_address(endpoint_type)
- if is_ipv6(address):
- address = "[{}]".format(address)
-
- return '%s://%s' % (scheme, address)
-
-
-def _get_scheme(configs):
- """Returns the scheme to use for the url (either http or https)
- depending upon whether https is in the configs value.
-
- :param configs: OSTemplateRenderer config templating object to inspect
- for a complete https context.
- :returns: either 'http' or 'https' depending on whether https is
- configured within the configs context.
- """
- scheme = 'http'
- if configs and 'https' in configs.complete_contexts():
- scheme = 'https'
- return scheme
-
-
-def _get_address_override(endpoint_type=PUBLIC):
- """Returns any address overrides that the user has defined based on the
- endpoint type.
-
- Note: this function allows for the service name to be inserted into the
- address if the user specifies {service_name}.somehost.org.
-
- :param endpoint_type: the type of endpoint to retrieve the override
- value for.
- :returns: any endpoint address or hostname that the user has overridden
- or None if an override is not present.
- """
- override_key = ADDRESS_MAP[endpoint_type]['override']
- addr_override = config(override_key)
- if not addr_override:
- return None
- else:
- return addr_override.format(service_name=service_name())
-
-
-def resolve_address(endpoint_type=PUBLIC):
- """Return unit address depending on net config.
-
- If unit is clustered with vip(s) and has net splits defined, return vip on
- correct network. If clustered with no nets defined, return primary vip.
-
- If not clustered, return unit address ensuring address is on configured net
- split if one is configured.
-
- :param endpoint_type: Network endpoing type
- """
- resolved_address = _get_address_override(endpoint_type)
- if resolved_address:
- return resolved_address
-
- vips = config('vip')
- if vips:
- vips = vips.split()
-
- net_type = ADDRESS_MAP[endpoint_type]['config']
- net_addr = config(net_type)
- net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
- clustered = is_clustered()
- if clustered:
- if not net_addr:
- # If no net-splits defined, we expect a single vip
- resolved_address = vips[0]
- else:
- for vip in vips:
- if is_address_in_network(net_addr, vip):
- resolved_address = vip
- break
- else:
- if config('prefer-ipv6'):
- fallback_addr = get_ipv6_addr(exc_list=vips)[0]
- else:
- fallback_addr = unit_get(net_fallback)
-
- resolved_address = get_address_in_network(net_addr, fallback_addr)
-
- if resolved_address is None:
- raise ValueError("Unable to resolve a suitable IP address based on "
- "charm state and configuration. (net_type=%s, "
- "clustered=%s)" % (net_type, clustered))
-
- return resolved_address
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/neutron.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/neutron.py
deleted file mode 100644
index 55b2037..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/neutron.py
+++ /dev/null
@@ -1,356 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Various utilies for dealing with Neutron and the renaming from Quantum.
-
-import six
-from subprocess import check_output
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- ERROR,
-)
-
-from charmhelpers.contrib.openstack.utils import os_release
-
-
-def headers_package():
- """Ensures correct linux-headers for running kernel are installed,
- for building DKMS package"""
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- return 'linux-headers-%s' % kver
-
-QUANTUM_CONF_DIR = '/etc/quantum'
-
-
-def kernel_version():
- """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
- kver = check_output(['uname', '-r']).decode('UTF-8').strip()
- kver = kver.split('.')
- return (int(kver[0]), int(kver[1]))
-
-
-def determine_dkms_package():
- """ Determine which DKMS package should be used based on kernel version """
- # NOTE: 3.13 kernels have support for GRE and VXLAN native
- if kernel_version() >= (3, 13):
- return []
- else:
- return ['openvswitch-datapath-dkms']
-
-
-# legacy
-
-
-def quantum_plugins():
- from charmhelpers.contrib.openstack import context
- return {
- 'ovs': {
- 'config': '/etc/quantum/plugins/openvswitch/'
- 'ovs_quantum_plugin.ini',
- 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
- 'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': ['quantum-plugin-openvswitch-agent'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['quantum-plugin-openvswitch-agent']],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-openvswitch'],
- 'server_services': ['quantum-server']
- },
- 'nvp': {
- 'config': '/etc/quantum/plugins/nicira/nvp.ini',
- 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
- 'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['quantum-server',
- 'quantum-plugin-nicira'],
- 'server_services': ['quantum-server']
- }
- }
-
-NEUTRON_CONF_DIR = '/etc/neutron'
-
-
-def neutron_plugins():
- from charmhelpers.contrib.openstack import context
- release = os_release('nova-common')
- plugins = {
- 'ovs': {
- 'config': '/etc/neutron/plugins/openvswitch/'
- 'ovs_neutron_plugin.ini',
- 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
- 'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['neutron-plugin-openvswitch-agent'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['neutron-plugin-openvswitch-agent']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-openvswitch'],
- 'server_services': ['neutron-server']
- },
- 'nvp': {
- 'config': '/etc/neutron/plugins/nicira/nvp.ini',
- 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
- 'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-nicira'],
- 'server_services': ['neutron-server']
- },
- 'nsx': {
- 'config': '/etc/neutron/plugins/vmware/nsx.ini',
- 'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-vmware'],
- 'server_services': ['neutron-server']
- },
- 'n1kv': {
- 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
- 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['neutron-plugin-cisco']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-cisco'],
- 'server_services': ['neutron-server']
- },
- 'Calico': {
- 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
- 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': ['calico-felix',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd'],
- 'packages': [[headers_package()] + determine_dkms_package(),
- ['calico-compute',
- 'bird',
- 'neutron-dhcp-agent',
- 'nova-api-metadata',
- 'etcd']],
- 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
- 'server_services': ['neutron-server', 'etcd']
- },
- 'vsp': {
- 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
- 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [],
- 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
- 'server_services': ['neutron-server']
- },
- 'plumgrid': {
- 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
- 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
- 'services': [],
- 'packages': [['plumgrid-lxc'],
- ['iovisor-dkms']],
- 'server_packages': ['neutron-server',
- 'neutron-plugin-plumgrid'],
- 'server_services': ['neutron-server']
- }
- }
- if release >= 'icehouse':
- # NOTE: patch in ml2 plugin for icehouse onwards
- plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
- plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
- plugins['ovs']['server_packages'] = ['neutron-server',
- 'neutron-plugin-ml2']
- # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
- plugins['nvp'] = plugins['nsx']
- return plugins
-
-
-def neutron_plugin_attribute(plugin, attr, net_manager=None):
- manager = net_manager or network_manager()
- if manager == 'quantum':
- plugins = quantum_plugins()
- elif manager == 'neutron':
- plugins = neutron_plugins()
- else:
- log("Network manager '%s' does not support plugins." % (manager),
- level=ERROR)
- raise Exception
-
- try:
- _plugin = plugins[plugin]
- except KeyError:
- log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
- raise Exception
-
- try:
- return _plugin[attr]
- except KeyError:
- return None
-
-
-def network_manager():
- '''
- Deals with the renaming of Quantum to Neutron in H and any situations
- that require compatability (eg, deploying H with network-manager=quantum,
- upgrading from G).
- '''
- release = os_release('nova-common')
- manager = config('network-manager').lower()
-
- if manager not in ['quantum', 'neutron']:
- return manager
-
- if release in ['essex']:
- # E does not support neutron
- log('Neutron networking not supported in Essex.', level=ERROR)
- raise Exception
- elif release in ['folsom', 'grizzly']:
- # neutron is named quantum in F and G
- return 'quantum'
- else:
- # ensure accurate naming for all releases post-H
- return 'neutron'
-
-
-def parse_mappings(mappings, key_rvalue=False):
- """By default mappings are lvalue keyed.
-
- If key_rvalue is True, the mapping will be reversed to allow multiple
- configs for the same lvalue.
- """
- parsed = {}
- if mappings:
- mappings = mappings.split()
- for m in mappings:
- p = m.partition(':')
-
- if key_rvalue:
- key_index = 2
- val_index = 0
- # if there is no rvalue skip to next
- if not p[1]:
- continue
- else:
- key_index = 0
- val_index = 2
-
- key = p[key_index].strip()
- parsed[key] = p[val_index].strip()
-
- return parsed
-
-
-def parse_bridge_mappings(mappings):
- """Parse bridge mappings.
-
- Mappings must be a space-delimited list of provider:bridge mappings.
-
- Returns dict of the form {provider:bridge}.
- """
- return parse_mappings(mappings)
-
-
-def parse_data_port_mappings(mappings, default_bridge='br-data'):
- """Parse data port mappings.
-
- Mappings must be a space-delimited list of port:bridge mappings.
-
- Returns dict of the form {port:bridge} where port may be an mac address or
- interface name.
- """
-
- # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
- # proposed for <port> since it may be a mac address which will differ
- # across units this allowing first-known-good to be chosen.
- _mappings = parse_mappings(mappings, key_rvalue=True)
- if not _mappings or list(_mappings.values()) == ['']:
- if not mappings:
- return {}
-
- # For backwards-compatibility we need to support port-only provided in
- # config.
- _mappings = {mappings.split()[0]: default_bridge}
-
- ports = _mappings.keys()
- if len(set(ports)) != len(ports):
- raise Exception("It is not allowed to have the same port configured "
- "on more than one bridge")
-
- return _mappings
-
-
-def parse_vlan_range_mappings(mappings):
- """Parse vlan range mappings.
-
- Mappings must be a space-delimited list of provider:start:end mappings.
-
- The start:end range is optional and may be omitted.
-
- Returns dict of the form {provider: (start, end)}.
- """
- _mappings = parse_mappings(mappings)
- if not _mappings:
- return {}
-
- mappings = {}
- for p, r in six.iteritems(_mappings):
- mappings[p] = tuple(r.split(':'))
-
- return mappings
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/__init__.py
deleted file mode 100644
index 7587679..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# dummy __init__.py to fool syncer into thinking this is a syncable python
-# module
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
deleted file mode 100644
index b99851c..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# cinder configuration file maintained by Juju
-# local changes may be overwritten.
-###############################################################################
-[global]
-{% if auth -%}
-auth_supported = {{ auth }}
-keyring = /etc/ceph/$cluster.$name.keyring
-mon host = {{ mon_hosts }}
-{% endif -%}
-log to syslog = {{ use_syslog }}
-err to syslog = {{ use_syslog }}
-clog to syslog = {{ use_syslog }}
-
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/git.upstart
deleted file mode 100644
index 4bed404..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/git.upstart
+++ /dev/null
@@ -1,17 +0,0 @@
-description "{{ service_description }}"
-author "Juju {{ service_name }} Charm <juju@localhost>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-
-exec start-stop-daemon --start --chuid {{ user_name }} \
- --chdir {{ start_dir }} --name {{ process_name }} \
- --exec {{ executable_name }} -- \
- {% for config_file in config_files -%}
- --config-file={{ config_file }} \
- {% endfor -%}
- {% if log_file -%}
- --log-file={{ log_file }}
- {% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
deleted file mode 100644
index ad875f1..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ /dev/null
@@ -1,58 +0,0 @@
-global
- log {{ local_host }} local0
- log {{ local_host }} local1 notice
- maxconn 20000
- user haproxy
- group haproxy
- spread-checks 0
-
-defaults
- log global
- mode tcp
- option tcplog
- option dontlognull
- retries 3
- timeout queue 1000
- timeout connect 1000
-{% if haproxy_client_timeout -%}
- timeout client {{ haproxy_client_timeout }}
-{% else -%}
- timeout client 30000
-{% endif -%}
-
-{% if haproxy_server_timeout -%}
- timeout server {{ haproxy_server_timeout }}
-{% else -%}
- timeout server 30000
-{% endif -%}
-
-listen stats {{ stat_port }}
- mode http
- stats enable
- stats hide-version
- stats realm Haproxy\ Statistics
- stats uri /
- stats auth admin:password
-
-{% if frontends -%}
-{% for service, ports in service_ports.items() -%}
-frontend tcp-in_{{ service }}
- bind *:{{ ports[0] }}
- {% if ipv6 -%}
- bind :::{{ ports[0] }}
- {% endif -%}
- {% for frontend in frontends -%}
- acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
- use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
- {% endfor -%}
- default_backend {{ service }}_{{ default_backend }}
-
-{% for frontend in frontends -%}
-backend {{ service }}_{{ frontend }}
- balance leastconn
- {% for unit, address in frontends[frontend]['backends'].items() -%}
- server {{ unit }} {{ address }}:{{ ports[1] }} check
- {% endfor %}
-{% endfor -%}
-{% endfor -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
deleted file mode 100644
index ce28fa3..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend
+++ /dev/null
@@ -1,24 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
deleted file mode 100644
index ce28fa3..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf
+++ /dev/null
@@ -1,24 +0,0 @@
-{% if endpoints -%}
-{% for ext_port in ext_ports -%}
-Listen {{ ext_port }}
-{% endfor -%}
-{% for address, endpoint, ext, int in endpoints -%}
-<VirtualHost {{ address }}:{{ ext }}>
- ServerName {{ endpoint }}
- SSLEngine on
- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
- ProxyPass / http://localhost:{{ int }}/
- ProxyPassReverse / http://localhost:{{ int }}/
- ProxyPreserveHost on
-</VirtualHost>
-{% endfor -%}
-<Proxy *>
- Order deny,allow
- Allow from all
-</Proxy>
-<Location />
- Order allow,deny
- Allow from all
-</Location>
-{% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
deleted file mode 100644
index 2a37edd..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken
+++ /dev/null
@@ -1,9 +0,0 @@
-{% if auth_host -%}
-[keystone_authtoken]
-identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
-auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
-admin_tenant_name = {{ admin_tenant_name }}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-signing_dir = {{ signing_dir }}
-{% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
deleted file mode 100644
index b444c9c..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo
+++ /dev/null
@@ -1,22 +0,0 @@
-{% if rabbitmq_host or rabbitmq_hosts -%}
-[oslo_messaging_rabbit]
-rabbit_userid = {{ rabbitmq_user }}
-rabbit_virtual_host = {{ rabbitmq_virtual_host }}
-rabbit_password = {{ rabbitmq_password }}
-{% if rabbitmq_hosts -%}
-rabbit_hosts = {{ rabbitmq_hosts }}
-{% if rabbitmq_ha_queues -%}
-rabbit_ha_queues = True
-rabbit_durable_queues = False
-{% endif -%}
-{% else -%}
-rabbit_host = {{ rabbitmq_host }}
-{% endif -%}
-{% if rabbit_ssl_port -%}
-rabbit_use_ssl = True
-rabbit_port = {{ rabbit_ssl_port }}
-{% if rabbit_ssl_ca -%}
-kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
-{% endif -%}
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
deleted file mode 100644
index 95f1a76..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templates/section-zeromq
+++ /dev/null
@@ -1,14 +0,0 @@
-{% if zmq_host -%}
-# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
-rpc_backend = zmq
-rpc_zmq_host = {{ zmq_host }}
-{% if zmq_redis_address -%}
-rpc_zmq_matchmaker = redis
-matchmaker_heartbeat_freq = 15
-matchmaker_heartbeat_ttl = 30
-[matchmaker_redis]
-host = {{ zmq_redis_address }}
-{% else -%}
-rpc_zmq_matchmaker = ring
-{% endif -%}
-{% endif -%}
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templating.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templating.py
deleted file mode 100644
index e5e3cb1..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/templating.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-import six
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import (
- log,
- ERROR,
- INFO
-)
-from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
-
-try:
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-except ImportError:
- apt_update(fatal=True)
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
-
-
-class OSConfigException(Exception):
- pass
-
-
-def get_loader(templates_dir, os_release):
- """
- Create a jinja2.ChoiceLoader containing template dirs up to
- and including os_release. If directory template directory
- is missing at templates_dir, it will be omitted from the loader.
- templates_dir is added to the bottom of the search list as a base
- loading dir.
-
- A charm may also ship a templates dir with this module
- and it will be appended to the bottom of the search list, eg::
-
- hooks/charmhelpers/contrib/openstack/templates
-
- :param templates_dir (str): Base template directory containing release
- sub-directories.
- :param os_release (str): OpenStack release codename to construct template
- loader.
- :returns: jinja2.ChoiceLoader constructed with a list of
- jinja2.FilesystemLoaders, ordered in descending
- order by OpenStack release.
- """
- tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
- for rel in six.itervalues(OPENSTACK_CODENAMES)]
-
- if not os.path.isdir(templates_dir):
- log('Templates directory not found @ %s.' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- # the bottom contains tempaltes_dir and possibly a common templates dir
- # shipped with the helper.
- loaders = [FileSystemLoader(templates_dir)]
- helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
- if os.path.isdir(helper_templates):
- loaders.append(FileSystemLoader(helper_templates))
-
- for rel, tmpl_dir in tmpl_dirs:
- if os.path.isdir(tmpl_dir):
- loaders.insert(0, FileSystemLoader(tmpl_dir))
- if rel == os_release:
- break
- log('Creating choice loader with dirs: %s' %
- [l.searchpath for l in loaders], level=INFO)
- return ChoiceLoader(loaders)
-
-
-class OSConfigTemplate(object):
- """
- Associates a config file template with a list of context generators.
- Responsible for constructing a template context based on those generators.
- """
- def __init__(self, config_file, contexts):
- self.config_file = config_file
-
- if hasattr(contexts, '__call__'):
- self.contexts = [contexts]
- else:
- self.contexts = contexts
-
- self._complete_contexts = []
-
- def context(self):
- ctxt = {}
- for context in self.contexts:
- _ctxt = context()
- if _ctxt:
- ctxt.update(_ctxt)
- # track interfaces for every complete context.
- [self._complete_contexts.append(interface)
- for interface in context.interfaces
- if interface not in self._complete_contexts]
- return ctxt
-
- def complete_contexts(self):
- '''
- Return a list of interfaces that have satisfied contexts.
- '''
- if self._complete_contexts:
- return self._complete_contexts
- self.context()
- return self._complete_contexts
-
-
-class OSConfigRenderer(object):
- """
- This class provides a common templating system to be used by OpenStack
- charms. It is intended to help charms share common code and templates,
- and ease the burden of managing config templates across multiple OpenStack
- releases.
-
- Basic usage::
-
- # import some common context generates from charmhelpers
- from charmhelpers.contrib.openstack import context
-
- # Create a renderer object for a specific OS release.
- configs = OSConfigRenderer(templates_dir='/tmp/templates',
- openstack_release='folsom')
- # register some config files with context generators.
- configs.register(config_file='/etc/nova/nova.conf',
- contexts=[context.SharedDBContext(),
- context.AMQPContext()])
- configs.register(config_file='/etc/nova/api-paste.ini',
- contexts=[context.IdentityServiceContext()])
- configs.register(config_file='/etc/haproxy/haproxy.conf',
- contexts=[context.HAProxyContext()])
- # write out a single config
- configs.write('/etc/nova/nova.conf')
- # write out all registered configs
- configs.write_all()
-
- **OpenStack Releases and template loading**
-
- When the object is instantiated, it is associated with a specific OS
- release. This dictates how the template loader will be constructed.
-
- The constructed loader attempts to load the template from several places
- in the following order:
- - from the most recent OS release-specific template dir (if one exists)
- - the base templates_dir
- - a template directory shipped in the charm with this helper file.
-
- For the example above, '/tmp/templates' contains the following structure::
-
- /tmp/templates/nova.conf
- /tmp/templates/api-paste.ini
- /tmp/templates/grizzly/api-paste.ini
- /tmp/templates/havana/api-paste.ini
-
- Since it was registered with the grizzly release, it first seraches
- the grizzly directory for nova.conf, then the templates dir.
-
- When writing api-paste.ini, it will find the template in the grizzly
- directory.
-
- If the object were created with folsom, it would fall back to the
- base templates dir for its api-paste.ini template.
-
- This system should help manage changes in config files through
- openstack releases, allowing charms to fall back to the most recently
- updated config template for a given release
-
- The haproxy.conf, since it is not shipped in the templates dir, will
- be loaded from the module directory's template directory, eg
- $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
- us to ship common templates (haproxy, apache) with the helpers.
-
- **Context generators**
-
- Context generators are used to generate template contexts during hook
- execution. Doing so may require inspecting service relations, charm
- config, etc. When registered, a config file is associated with a list
- of generators. When a template is rendered and written, all context
- generates are called in a chain to generate the context dictionary
- passed to the jinja2 template. See context.py for more info.
- """
- def __init__(self, templates_dir, openstack_release):
- if not os.path.isdir(templates_dir):
- log('Could not locate templates dir %s' % templates_dir,
- level=ERROR)
- raise OSConfigException
-
- self.templates_dir = templates_dir
- self.openstack_release = openstack_release
- self.templates = {}
- self._tmpl_env = None
-
- if None in [Environment, ChoiceLoader, FileSystemLoader]:
- # if this code is running, the object is created pre-install hook.
- # jinja2 shouldn't get touched until the module is reloaded on next
- # hook execution, with proper jinja2 bits successfully imported.
- apt_install('python-jinja2')
-
- def register(self, config_file, contexts):
- """
- Register a config file with a list of context generators to be called
- during rendering.
- """
- self.templates[config_file] = OSConfigTemplate(config_file=config_file,
- contexts=contexts)
- log('Registered config file: %s' % config_file, level=INFO)
-
- def _get_tmpl_env(self):
- if not self._tmpl_env:
- loader = get_loader(self.templates_dir, self.openstack_release)
- self._tmpl_env = Environment(loader=loader)
-
- def _get_template(self, template):
- self._get_tmpl_env()
- template = self._tmpl_env.get_template(template)
- log('Loaded template from %s' % template.filename, level=INFO)
- return template
-
- def render(self, config_file):
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
- ctxt = self.templates[config_file].context()
-
- _tmpl = os.path.basename(config_file)
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound:
- # if no template is found with basename, try looking for it
- # using a munged full path, eg:
- # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
- _tmpl = '_'.join(config_file.split('/')[1:])
- try:
- template = self._get_template(_tmpl)
- except exceptions.TemplateNotFound as e:
- log('Could not load template from %s by %s or %s.' %
- (self.templates_dir, os.path.basename(config_file), _tmpl),
- level=ERROR)
- raise e
-
- log('Rendering from template: %s' % _tmpl, level=INFO)
- return template.render(ctxt)
-
- def write(self, config_file):
- """
- Write a single config file, raises if config file is not registered.
- """
- if config_file not in self.templates:
- log('Config not registered: %s' % config_file, level=ERROR)
- raise OSConfigException
-
- _out = self.render(config_file)
-
- with open(config_file, 'wb') as out:
- out.write(_out)
-
- log('Wrote template %s.' % config_file, level=INFO)
-
- def write_all(self):
- """
- Write out all registered config files.
- """
- [self.write(k) for k in six.iterkeys(self.templates)]
-
- def set_release(self, openstack_release):
- """
- Resets the template environment and generates a new template loader
- based on a the new openstack release.
- """
- self._tmpl_env = None
- self.openstack_release = openstack_release
- self._get_tmpl_env()
-
- def complete_contexts(self):
- '''
- Returns a list of context interfaces that yield a complete context.
- '''
- interfaces = []
- [interfaces.extend(i.complete_contexts())
- for i in six.itervalues(self.templates)]
- return interfaces
-
- def get_incomplete_context_data(self, interfaces):
- '''
- Return dictionary of relation status of interfaces and any missing
- required context data. Example:
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}}
- '''
- incomplete_context_data = {}
-
- for i in six.itervalues(self.templates):
- for context in i.contexts:
- for interface in interfaces:
- related = False
- if interface in context.interfaces:
- related = context.get_related()
- missing_data = context.missing_data
- if missing_data:
- incomplete_context_data[interface] = {'missing_data': missing_data}
- if related:
- if incomplete_context_data.get(interface):
- incomplete_context_data[interface].update({'related': True})
- else:
- incomplete_context_data[interface] = {'related': True}
- else:
- incomplete_context_data[interface] = {'related': False}
- return incomplete_context_data
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/utils.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/utils.py
deleted file mode 100644
index 24b998d..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/openstack/utils.py
+++ /dev/null
@@ -1,926 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Common python helper functions used for OpenStack charms.
-from collections import OrderedDict
-from functools import wraps
-
-import subprocess
-import json
-import os
-import sys
-import re
-
-import six
-import yaml
-
-from charmhelpers.contrib.network import ip
-
-from charmhelpers.core import (
- unitdata,
-)
-
-from charmhelpers.core.hookenv import (
- config,
- log as juju_log,
- charm_dir,
- INFO,
- relation_ids,
- relation_set,
- status_set,
- hook_name
-)
-
-from charmhelpers.contrib.storage.linux.lvm import (
- deactivate_lvm_volume_group,
- is_lvm_physical_volume,
- remove_lvm_physical_volume,
-)
-
-from charmhelpers.contrib.network.ip import (
- get_ipv6_addr
-)
-
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
-from charmhelpers.core.host import lsb_release, mounts, umount
-from charmhelpers.fetch import apt_install, apt_cache, install_remote
-from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
-from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
-
-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
-
-DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
- 'restricted main multiverse universe')
-
-UBUNTU_OPENSTACK_RELEASE = OrderedDict([
- ('oneiric', 'diablo'),
- ('precise', 'essex'),
- ('quantal', 'folsom'),
- ('raring', 'grizzly'),
- ('saucy', 'havana'),
- ('trusty', 'icehouse'),
- ('utopic', 'juno'),
- ('vivid', 'kilo'),
- ('wily', 'liberty'),
-])
-
-
-OPENSTACK_CODENAMES = OrderedDict([
- ('2011.2', 'diablo'),
- ('2012.1', 'essex'),
- ('2012.2', 'folsom'),
- ('2013.1', 'grizzly'),
- ('2013.2', 'havana'),
- ('2014.1', 'icehouse'),
- ('2014.2', 'juno'),
- ('2015.1', 'kilo'),
- ('2015.2', 'liberty'),
-])
-
-# The ugly duckling
-SWIFT_CODENAMES = OrderedDict([
- ('1.4.3', 'diablo'),
- ('1.4.8', 'essex'),
- ('1.7.4', 'folsom'),
- ('1.8.0', 'grizzly'),
- ('1.7.7', 'grizzly'),
- ('1.7.6', 'grizzly'),
- ('1.10.0', 'havana'),
- ('1.9.1', 'havana'),
- ('1.9.0', 'havana'),
- ('1.13.1', 'icehouse'),
- ('1.13.0', 'icehouse'),
- ('1.12.0', 'icehouse'),
- ('1.11.0', 'icehouse'),
- ('2.0.0', 'juno'),
- ('2.1.0', 'juno'),
- ('2.2.0', 'juno'),
- ('2.2.1', 'kilo'),
- ('2.2.2', 'kilo'),
- ('2.3.0', 'liberty'),
- ('2.4.0', 'liberty'),
-])
-
-# >= Liberty version->codename mapping
-PACKAGE_CODENAMES = {
- 'nova-common': OrderedDict([
- ('12.0.0', 'liberty'),
- ]),
- 'neutron-common': OrderedDict([
- ('7.0.0', 'liberty'),
- ]),
- 'cinder-common': OrderedDict([
- ('7.0.0', 'liberty'),
- ]),
- 'keystone': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
- 'horizon-common': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
- 'ceilometer-common': OrderedDict([
- ('5.0.0', 'liberty'),
- ]),
- 'heat-common': OrderedDict([
- ('5.0.0', 'liberty'),
- ]),
- 'glance-common': OrderedDict([
- ('11.0.0', 'liberty'),
- ]),
- 'openstack-dashboard': OrderedDict([
- ('8.0.0', 'liberty'),
- ]),
-}
-
-DEFAULT_LOOPBACK_SIZE = '5G'
-
-
-def error_out(msg):
- juju_log("FATAL ERROR: %s" % msg, level='ERROR')
- sys.exit(1)
-
-
-def get_os_codename_install_source(src):
- '''Derive OpenStack release codename from a given installation source.'''
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = ''
- if src is None:
- return rel
- if src in ['distro', 'distro-proposed']:
- try:
- rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
- except KeyError:
- e = 'Could not derive openstack release for '\
- 'this Ubuntu release: %s' % ubuntu_rel
- error_out(e)
- return rel
-
- if src.startswith('cloud:'):
- ca_rel = src.split(':')[1]
- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
- return ca_rel
-
- # Best guess match based on deb string provided
- if src.startswith('deb') or src.startswith('ppa'):
- for k, v in six.iteritems(OPENSTACK_CODENAMES):
- if v in src:
- return v
-
-
-def get_os_version_install_source(src):
- codename = get_os_codename_install_source(src)
- return get_os_version_codename(codename)
-
-
-def get_os_codename_version(vers):
- '''Determine OpenStack codename from version number.'''
- try:
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
- '''Determine OpenStack version number from codename.'''
- for k, v in six.iteritems(version_map):
- if v == codename:
- return k
- e = 'Could not derive OpenStack version for '\
- 'codename: %s' % codename
- error_out(e)
-
-
-def get_os_codename_package(package, fatal=True):
- '''Derive OpenStack release codename from an installed package.'''
- import apt_pkg as apt
-
- cache = apt_cache()
-
- try:
- pkg = cache[package]
- except:
- if not fatal:
- return None
- # the package is unknown to the current apt cache.
- e = 'Could not determine version of package with no installation '\
- 'candidate: %s' % package
- error_out(e)
-
- if not pkg.current_ver:
- if not fatal:
- return None
- # package is known, but no version is currently installed.
- e = 'Could not determine version of uninstalled package: %s' % package
- error_out(e)
-
- vers = apt.upstream_version(pkg.current_ver.ver_str)
- match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
- if match:
- vers = match.group(0)
-
- # >= Liberty independent project versions
- if (package in PACKAGE_CODENAMES and
- vers in PACKAGE_CODENAMES[package]):
- return PACKAGE_CODENAMES[package][vers]
- else:
- # < Liberty co-ordinated project versions
- try:
- if 'swift' in pkg.name:
- swift_vers = vers[:5]
- if swift_vers not in SWIFT_CODENAMES:
- # Deal with 1.10.0 upward
- swift_vers = vers[:6]
- return SWIFT_CODENAMES[swift_vers]
- else:
- vers = vers[:6]
- return OPENSTACK_CODENAMES[vers]
- except KeyError:
- if not fatal:
- return None
- e = 'Could not determine OpenStack codename for version %s' % vers
- error_out(e)
-
-
-def get_os_version_package(pkg, fatal=True):
- '''Derive OpenStack version number from an installed package.'''
- codename = get_os_codename_package(pkg, fatal=fatal)
-
- if not codename:
- return None
-
- if 'swift' in pkg:
- vers_map = SWIFT_CODENAMES
- else:
- vers_map = OPENSTACK_CODENAMES
-
- for version, cname in six.iteritems(vers_map):
- if cname == codename:
- return version
- # e = "Could not determine OpenStack version for package: %s" % pkg
- # error_out(e)
-
-
-os_rel = None
-
-
-def os_release(package, base='essex'):
- '''
- Returns OpenStack release codename from a cached global.
- If the codename can not be determined from either an installed package or
- the installation source, the earliest release supported by the charm should
- be returned.
- '''
- global os_rel
- if os_rel:
- return os_rel
- os_rel = (get_os_codename_package(package, fatal=False) or
- get_os_codename_install_source(config('openstack-origin')) or
- base)
- return os_rel
-
-
-def import_key(keyid):
- cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
- "--recv-keys %s" % keyid
- try:
- subprocess.check_call(cmd.split(' '))
- except subprocess.CalledProcessError:
- error_out("Error importing repo key %s" % keyid)
-
-
-def configure_installation_source(rel):
- '''Configure apt installation source.'''
- if rel == 'distro':
- return
- elif rel == 'distro-proposed':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(DISTRO_PROPOSED % ubuntu_rel)
- elif rel[:4] == "ppa:":
- src = rel
- subprocess.check_call(["add-apt-repository", "-y", src])
- elif rel[:3] == "deb":
- l = len(rel.split('|'))
- if l == 2:
- src, key = rel.split('|')
- juju_log("Importing PPA key from keyserver for %s" % src)
- import_key(key)
- elif l == 1:
- src = rel
- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
- f.write(src)
- elif rel[:6] == 'cloud:':
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
- rel = rel.split(':')[1]
- u_rel = rel.split('-')[0]
- ca_rel = rel.split('-')[1]
-
- if u_rel != ubuntu_rel:
- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
- 'version (%s)' % (ca_rel, ubuntu_rel)
- error_out(e)
-
- if 'staging' in ca_rel:
- # staging is just a regular PPA.
- os_rel = ca_rel.split('/')[0]
- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
- cmd = 'add-apt-repository -y %s' % ppa
- subprocess.check_call(cmd.split(' '))
- return
-
- # map charm config options to actual archive pockets.
- pockets = {
- 'folsom': 'precise-updates/folsom',
- 'folsom/updates': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'grizzly': 'precise-updates/grizzly',
- 'grizzly/updates': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'havana': 'precise-updates/havana',
- 'havana/updates': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'icehouse': 'precise-updates/icehouse',
- 'icehouse/updates': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'juno': 'trusty-updates/juno',
- 'juno/updates': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'kilo': 'trusty-updates/kilo',
- 'kilo/updates': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'liberty': 'trusty-updates/liberty',
- 'liberty/updates': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'mitaka': 'trusty-updates/mitaka',
- 'mitaka/updates': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- }
-
- try:
- pocket = pockets[ca_rel]
- except KeyError:
- e = 'Invalid Cloud Archive release specified: %s' % rel
- error_out(e)
-
- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
- apt_install('ubuntu-cloud-keyring', fatal=True)
-
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
- f.write(src)
- else:
- error_out("Invalid openstack-release specified: %s" % rel)
-
-
-def config_value_changed(option):
- """
- Determine if config value changed since last call to this function.
- """
- hook_data = unitdata.HookData()
- with hook_data():
- db = unitdata.kv()
- current = config(option)
- saved = db.get(option)
- db.set(option, current)
- if saved is None:
- return False
- return current != saved
-
-
-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
- """
- Write an rc file in the charm-delivered directory containing
- exported environment variables provided by env_vars. Any charm scripts run
- outside the juju hook environment can source this scriptrc to obtain
- updated config information necessary to perform health checks or
- service changes.
- """
- juju_rc_path = "%s/%s" % (charm_dir(), script_path)
- if not os.path.exists(os.path.dirname(juju_rc_path)):
- os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
- rc_script.write(
- "#!/bin/bash\n")
- [rc_script.write('export %s=%s\n' % (u, p))
- for u, p in six.iteritems(env_vars) if u != "script_path"]
-
-
-def openstack_upgrade_available(package):
- """
- Determines if an OpenStack upgrade is available from installation
- source, based on version of installed package.
-
- :param package: str: Name of installed package.
-
- :returns: bool: : Returns True if configured installation source offers
- a newer version of package.
-
- """
-
- import apt_pkg as apt
- src = config('openstack-origin')
- cur_vers = get_os_version_package(package)
- if "swift" in package:
- codename = get_os_codename_install_source(src)
- available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
- else:
- available_vers = get_os_version_install_source(src)
- apt.init()
- return apt.version_compare(available_vers, cur_vers) == 1
-
-
-def ensure_block_device(block_device):
- '''
- Confirm block_device, create as loopback if necessary.
-
- :param block_device: str: Full path of block device to ensure.
-
- :returns: str: Full path of ensured block device.
- '''
- _none = ['None', 'none', None]
- if (block_device in _none):
- error_out('prepare_storage(): Missing required input: block_device=%s.'
- % block_device)
-
- if block_device.startswith('/dev/'):
- bdev = block_device
- elif block_device.startswith('/'):
- _bd = block_device.split('|')
- if len(_bd) == 2:
- bdev, size = _bd
- else:
- bdev = block_device
- size = DEFAULT_LOOPBACK_SIZE
- bdev = ensure_loopback_device(bdev, size)
- else:
- bdev = '/dev/%s' % block_device
-
- if not is_block_device(bdev):
- error_out('Failed to locate valid block device at %s' % bdev)
-
- return bdev
-
-
-def clean_storage(block_device):
- '''
- Ensures a block device is clean. That is:
- - unmounted
- - any lvm volume groups are deactivated
- - any lvm physical device signatures removed
- - partition table wiped
-
- :param block_device: str: Full path to block device to clean.
- '''
- for mp, d in mounts():
- if d == block_device:
- juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
- (d, mp), level=INFO)
- umount(mp, persist=True)
-
- if is_lvm_physical_volume(block_device):
- deactivate_lvm_volume_group(block_device)
- remove_lvm_physical_volume(block_device)
- else:
- zap_disk(block_device)
-
-is_ip = ip.is_ip
-ns_query = ip.ns_query
-get_host_ip = ip.get_host_ip
-get_hostname = ip.get_hostname
-
-
-def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
- mm_map = {}
- if os.path.isfile(mm_file):
- with open(mm_file, 'r') as f:
- mm_map = json.load(f)
- return mm_map
-
-
-def sync_db_with_multi_ipv6_addresses(database, database_user,
- relation_prefix=None):
- hosts = get_ipv6_addr(dynamic_only=False)
-
- kwargs = {'database': database,
- 'username': database_user,
- 'hostname': json.dumps(hosts)}
-
- if relation_prefix:
- for key in list(kwargs.keys()):
- kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
- del kwargs[key]
-
- for rid in relation_ids('shared-db'):
- relation_set(relation_id=rid, **kwargs)
-
-
-def os_requires_version(ostack_release, pkg):
- """
- Decorator for hook to specify minimum supported release
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args):
- if os_release(pkg) < ostack_release:
- raise Exception("This hook is not supported on releases"
- " before %s" % ostack_release)
- f(*args)
- return wrapped_f
- return wrap
-
-
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-requirements_dir = None
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-def git_clone_and_install(projects_yaml, core_project, depth=1):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements):
- """
- Clone and install a single git repository.
- """
- dest_dir = os.path.join(parent_dir, os.path.basename(repo))
-
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- if not os.path.exists(dest_dir):
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
- depth=depth)
- else:
- repo_dir = dest_dir
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv)
- else:
- pip_install(repo_dir, venv=venv)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Decorator to set workload status based on complete contexts
- """
- def wrap(f):
- @wraps(f)
- def wrapped_f(*args, **kwargs):
- # Run the original function first
- f(*args, **kwargs)
- # Set workload status now that contexts have been
- # acted on
- set_os_workload_status(configs, required_interfaces, charm_func)
- return wrapped_f
- return wrap
-
-
-def set_os_workload_status(configs, required_interfaces, charm_func=None):
- """
- Set workload status based on complete contexts.
- status-set missing or incomplete contexts
- and juju-log details of missing required data.
- charm_func is a charm specific function to run checking
- for charm specific requirements such as a VIP setting.
- """
- incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
- state = 'active'
- missing_relations = []
- incomplete_relations = []
- message = None
- charm_state = None
- charm_message = None
-
- for generic_interface in incomplete_rel_data.keys():
- related_interface = None
- missing_data = {}
- # Related or not?
- for interface in incomplete_rel_data[generic_interface]:
- if incomplete_rel_data[generic_interface][interface].get('related'):
- related_interface = interface
- missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
- # No relation ID for the generic_interface
- if not related_interface:
- juju_log("{} relation is missing and must be related for "
- "functionality. ".format(generic_interface), 'WARN')
- state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
- else:
- # Relation ID exists but no related unit
- if not missing_data:
- # Edge case relation ID exists but departing
- if ('departed' in hook_name() or 'broken' in hook_name()) \
- and related_interface in hook_name():
- state = 'blocked'
- if generic_interface not in missing_relations:
- missing_relations.append(generic_interface)
- juju_log("{} relation's interface, {}, "
- "relationship is departed or broken "
- "and is required for functionality."
- "".format(generic_interface, related_interface), "WARN")
- # Normal case relation ID exists but no related unit
- # (joining)
- else:
- juju_log("{} relations's interface, {}, is related but has "
- "no units in the relation."
- "".format(generic_interface, related_interface), "INFO")
- # Related unit exists and data missing on the relation
- else:
- juju_log("{} relation's interface, {}, is related awaiting "
- "the following data from the relationship: {}. "
- "".format(generic_interface, related_interface,
- ", ".join(missing_data)), "INFO")
- if state != 'blocked':
- state = 'waiting'
- if generic_interface not in incomplete_relations \
- and generic_interface not in missing_relations:
- incomplete_relations.append(generic_interface)
-
- if missing_relations:
- message = "Missing relations: {}".format(", ".join(missing_relations))
- if incomplete_relations:
- message += "; incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'blocked'
- elif incomplete_relations:
- message = "Incomplete relations: {}" \
- "".format(", ".join(incomplete_relations))
- state = 'waiting'
-
- # Run charm specific checks
- if charm_func:
- charm_state, charm_message = charm_func(configs)
- if charm_state != 'active' and charm_state != 'unknown':
- state = workload_state_compare(state, charm_state)
- if message:
- message = "{} {}".format(message, charm_message)
- else:
- message = charm_message
-
- # Set to active if all requirements have been met
- if state == 'active':
- message = "Unit is ready"
- juju_log(message, "INFO")
-
- status_set(state, message)
-
-
-def workload_state_compare(current_workload_state, workload_state):
- """ Return highest priority of two states"""
- hierarchy = {'unknown': -1,
- 'active': 0,
- 'maintenance': 1,
- 'waiting': 2,
- 'blocked': 3,
- }
-
- if hierarchy.get(workload_state) is None:
- workload_state = 'unknown'
- if hierarchy.get(current_workload_state) is None:
- current_workload_state = 'unknown'
-
- # Set workload_state based on hierarchy of statuses
- if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
- return current_workload_state
- else:
- return workload_state
-
-
-def incomplete_relation_data(configs, required_interfaces):
- """
- Check complete contexts against required_interfaces
- Return dictionary of incomplete relation data.
-
- configs is an OSConfigRenderer object with configs registered
-
- required_interfaces is a dictionary of required general interfaces
- with dictionary values of possible specific interfaces.
- Example:
- required_interfaces = {'database': ['shared-db', 'pgsql-db']}
-
- The interface is said to be satisfied if anyone of the interfaces in the
- list has a complete context.
-
- Return dictionary of incomplete or missing required contexts with relation
- status of interfaces and any missing data points. Example:
- {'message':
- {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
- 'zeromq-configuration': {'related': False}},
- 'identity':
- {'identity-service': {'related': False}},
- 'database':
- {'pgsql-db': {'related': False},
- 'shared-db': {'related': True}}}
- """
- complete_ctxts = configs.complete_contexts()
- incomplete_relations = []
- for svc_type in required_interfaces.keys():
- # Avoid duplicates
- found_ctxt = False
- for interface in required_interfaces[svc_type]:
- if interface in complete_ctxts:
- found_ctxt = True
- if not found_ctxt:
- incomplete_relations.append(svc_type)
- incomplete_context_data = {}
- for i in incomplete_relations:
- incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
- return incomplete_context_data
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/debug.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/debug.py
deleted file mode 100644
index 871cd6f..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/debug.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import print_function
-
-import atexit
-import sys
-
-from charmhelpers.contrib.python.rpdb import Rpdb
-from charmhelpers.core.hookenv import (
- open_port,
- close_port,
- ERROR,
- log
-)
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-DEFAULT_ADDR = "0.0.0.0"
-DEFAULT_PORT = 4444
-
-
-def _error(message):
- log(message, level=ERROR)
-
-
-def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
- """
- Set a trace point using the remote debugger
- """
- atexit.register(close_port, port)
- try:
- log("Starting a remote python debugger session on %s:%s" % (addr,
- port))
- open_port(port)
- debugger = Rpdb(addr=addr, port=port)
- debugger.set_trace(sys._getframe().f_back)
- except:
- _error("Cannot start a remote debug session on %s:%s" % (addr,
- port))
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/packages.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index 10b32e3..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-try:
- from pip import main as pip_execute
-except ImportError:
- apt_update()
- apt_install('python-pip')
- from pip import main as pip_execute
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, **options):
- """Install a requirements file """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- apt_install('python-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/rpdb.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/rpdb.py
deleted file mode 100644
index d503f88..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/rpdb.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Remote Python Debugger (pdb wrapper)."""
-
-import pdb
-import socket
-import sys
-
-__author__ = "Bertrand Janin <b@janin.com>"
-__version__ = "0.1.3"
-
-
-class Rpdb(pdb.Pdb):
-
- def __init__(self, addr="127.0.0.1", port=4444):
- """Initialize the socket and initialize pdb."""
-
- # Backup stdin and stdout before replacing them by the socket handle
- self.old_stdout = sys.stdout
- self.old_stdin = sys.stdin
-
- # Open a 'reusable' socket to let the webapp reload on the same port
- self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
- self.skt.bind((addr, port))
- self.skt.listen(1)
- (clientsocket, address) = self.skt.accept()
- handle = clientsocket.makefile('rw')
- pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
- sys.stdout = sys.stdin = handle
-
- def shutdown(self):
- """Revert stdin and stdout, close the socket."""
- sys.stdout = self.old_stdout
- sys.stdin = self.old_stdin
- self.skt.close()
- self.set_continue()
-
- def do_continue(self, arg):
- """Stop all operation on ``continue``."""
- self.shutdown()
- return 1
-
- do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/version.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/version.py
deleted file mode 100644
index c39fcbf..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/python/version.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def current_version():
- """Current system python version"""
- return sys.version_info
-
-
-def current_version_string():
- """Current system python version as string major.minor.micro"""
- return "{0}.{1}.{2}".format(sys.version_info.major,
- sys.version_info.minor,
- sys.version_info.micro)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/ceph.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/ceph.py
deleted file mode 100644
index 83f264d..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ /dev/null
@@ -1,657 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2012 Canonical Ltd.
-#
-# This file is sourced from lp:openstack-charm-helpers
-#
-# Authors:
-# James Page <james.page@ubuntu.com>
-# Adam Gandelman <adamg@ubuntu.com>
-#
-
-import os
-import shutil
-import json
-import time
-import uuid
-
-from subprocess import (
- check_call,
- check_output,
- CalledProcessError,
-)
-from charmhelpers.core.hookenv import (
- local_unit,
- relation_get,
- relation_ids,
- relation_set,
- related_units,
- log,
- DEBUG,
- INFO,
- WARNING,
- ERROR,
-)
-from charmhelpers.core.host import (
- mount,
- mounts,
- service_start,
- service_stop,
- service_running,
- umount,
-)
-from charmhelpers.fetch import (
- apt_install,
-)
-
-from charmhelpers.core.kernel import modprobe
-
-KEYRING = '/etc/ceph/ceph.client.{}.keyring'
-KEYFILE = '/etc/ceph/ceph.client.{}.key'
-
-CEPH_CONF = """[global]
-auth supported = {auth}
-keyring = {keyring}
-mon host = {mon_hosts}
-log to syslog = {use_syslog}
-err to syslog = {use_syslog}
-clog to syslog = {use_syslog}
-"""
-
-
-def install():
- """Basic Ceph client installation."""
- ceph_dir = "/etc/ceph"
- if not os.path.exists(ceph_dir):
- os.mkdir(ceph_dir)
-
- apt_install('ceph-common', fatal=True)
-
-
-def rbd_exists(service, pool, rbd_img):
- """Check to see if a RADOS block device exists."""
- try:
- out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return rbd_img in out
-
-
-def create_rbd_image(service, pool, image, sizemb):
- """Create a new RADOS block device."""
- cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
- '--pool', pool]
- check_call(cmd)
-
-
-def pool_exists(service, name):
- """Check to see if a RADOS pool already exists."""
- try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def get_osds(service):
- """Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
- """
- version = ceph_version()
- if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
-
- return None
-
-
-def create_pool(service, name, replicas=3):
- """Create a new RADOS pool."""
- if pool_exists(service, name):
- log("Ceph pool {} already exists, skipping creation".format(name),
- level=WARNING)
- return
-
- # Calculate the number of placement groups based
- # on upstream recommended best practices.
- osds = get_osds(service)
- if osds:
- pgnum = (len(osds) * 100 // replicas)
- else:
- # NOTE(james-page): Default to 200 for older ceph versions
- # which don't support OSD query from cli
- pgnum = 200
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
- check_call(cmd)
-
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
- str(replicas)]
- check_call(cmd)
-
-
-def delete_pool(service, name):
- """Delete a RADOS pool from ceph."""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
- '--yes-i-really-really-mean-it']
- check_call(cmd)
-
-
-def _keyfile_path(service):
- return KEYFILE.format(service)
-
-
-def _keyring_path(service):
- return KEYRING.format(service)
-
-
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
- keyring = _keyring_path(service)
- if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
-
- cmd = ['ceph-authtool', keyring, '--create-keyring',
- '--name=client.{}'.format(service), '--add-key={}'.format(key)]
- check_call(cmd)
- log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
-
-
-def delete_keyring(service):
- """Delete an existing Ceph keyring."""
- keyring = _keyring_path(service)
- if not os.path.exists(keyring):
- log('Keyring does not exist at %s' % keyring, level=WARNING)
- return
-
- os.remove(keyring)
- log('Deleted ring at %s.' % keyring, level=INFO)
-
-
-def create_key_file(service, key):
- """Create a file containing key."""
- keyfile = _keyfile_path(service)
- if os.path.exists(keyfile):
- log('Keyfile exists at %s.' % keyfile, level=WARNING)
- return
-
- with open(keyfile, 'w') as fd:
- fd.write(key)
-
- log('Created new keyfile at %s.' % keyfile, level=INFO)
-
-
-def get_ceph_nodes():
- """Query named relation 'ceph' to determine current nodes."""
- hosts = []
- for r_id in relation_ids('ceph'):
- for unit in related_units(r_id):
- hosts.append(relation_get('private-address', unit=unit, rid=r_id))
-
- return hosts
-
-
-def configure(service, key, auth, use_syslog):
- """Perform basic configuration of Ceph."""
- create_keyring(service, key)
- create_key_file(service, key)
- hosts = get_ceph_nodes()
- with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
- ceph_conf.write(CEPH_CONF.format(auth=auth,
- keyring=_keyring_path(service),
- mon_hosts=",".join(map(str, hosts)),
- use_syslog=use_syslog))
- modprobe('rbd')
-
-
-def image_mapped(name):
- """Determine whether a RADOS block device is mapped locally."""
- try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
- except CalledProcessError:
- return False
-
- return name in out
-
-
-def map_block_storage(service, pool, image):
- """Map a RADOS block device for local use."""
- cmd = [
- 'rbd',
- 'map',
- '{}/{}'.format(pool, image),
- '--user',
- service,
- '--secret',
- _keyfile_path(service),
- ]
- check_call(cmd)
-
-
-def filesystem_mounted(fs):
- """Determine whether a filesytems is already mounted."""
- return fs in [f for f, m in mounts()]
-
-
-def make_filesystem(blk_device, fstype='ext4', timeout=10):
- """Make a new filesystem on the specified block device."""
- count = 0
- e_noent = os.errno.ENOENT
- while not os.path.exists(blk_device):
- if count >= timeout:
- log('Gave up waiting on block device %s' % blk_device,
- level=ERROR)
- raise IOError(e_noent, os.strerror(e_noent), blk_device)
-
- log('Waiting for block device %s to appear' % blk_device,
- level=DEBUG)
- count += 1
- time.sleep(1)
- else:
- log('Formatting block device %s as filesystem %s.' %
- (blk_device, fstype), level=INFO)
- check_call(['mkfs', '-t', fstype, blk_device])
-
-
-def place_data_on_block_device(blk_device, data_src_dst):
- """Migrate data in data_src_dst to blk_device and then remount."""
- # mount block device into /mnt
- mount(blk_device, '/mnt')
- # copy data to /mnt
- copy_files(data_src_dst, '/mnt')
- # umount block device
- umount('/mnt')
- # Grab user/group ID's from original source
- _dir = os.stat(data_src_dst)
- uid = _dir.st_uid
- gid = _dir.st_gid
- # re-mount where the data should originally be
- # TODO: persist is currently a NO-OP in core.host
- mount(blk_device, data_src_dst, persist=True)
- # ensure original ownership of new mount.
- os.chown(data_src_dst, uid, gid)
-
-
-def copy_files(src, dst, symlinks=False, ignore=None):
- """Copy files from src to dst."""
- for item in os.listdir(src):
- s = os.path.join(src, item)
- d = os.path.join(dst, item)
- if os.path.isdir(s):
- shutil.copytree(s, d, symlinks, ignore)
- else:
- shutil.copy2(s, d)
-
-
-def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
- blk_device, fstype, system_services=[],
- replicas=3):
- """NOTE: This function must only be called from a single service unit for
- the same rbd_img otherwise data loss will occur.
-
- Ensures given pool and RBD image exists, is mapped to a block device,
- and the device is formatted and mounted at the given mount_point.
-
- If formatting a device for the first time, data existing at mount_point
- will be migrated to the RBD device before being re-mounted.
-
- All services listed in system_services will be stopped prior to data
- migration and restarted when complete.
- """
- # Ensure pool, RBD image, RBD mappings are in place.
- if not pool_exists(service, pool):
- log('Creating new pool {}.'.format(pool), level=INFO)
- create_pool(service, pool, replicas=replicas)
-
- if not rbd_exists(service, pool, rbd_img):
- log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
- create_rbd_image(service, pool, rbd_img, sizemb)
-
- if not image_mapped(rbd_img):
- log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
- level=INFO)
- map_block_storage(service, pool, rbd_img)
-
- # make file system
- # TODO: What happens if for whatever reason this is run again and
- # the data is already in the rbd device and/or is mounted??
- # When it is mounted already, it will fail to make the fs
- # XXX: This is really sketchy! Need to at least add an fstab entry
- # otherwise this hook will blow away existing data if its executed
- # after a reboot.
- if not filesystem_mounted(mount_point):
- make_filesystem(blk_device, fstype)
-
- for svc in system_services:
- if service_running(svc):
- log('Stopping services {} prior to migrating data.'
- .format(svc), level=DEBUG)
- service_stop(svc)
-
- place_data_on_block_device(blk_device, mount_point)
-
- for svc in system_services:
- log('Starting service {} after migrating data.'
- .format(svc), level=DEBUG)
- service_start(svc)
-
-
-def ensure_ceph_keyring(service, user=None, group=None):
- """Ensures a ceph keyring is created for a named service and optionally
- ensures user and group ownership.
-
- Returns False if no ceph key is available in relation state.
- """
- key = None
- for rid in relation_ids('ceph'):
- for unit in related_units(rid):
- key = relation_get('key', rid=rid, unit=unit)
- if key:
- break
-
- if not key:
- return False
-
- create_keyring(service=service, key=key)
- keyring = _keyring_path(service)
- if user and group:
- check_call(['chown', '%s.%s' % (user, group), keyring])
-
- return True
-
-
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
-class CephBrokerRq(object):
- """Ceph broker request.
-
- Multiple operations can be added to a request and sent to the Ceph broker
- to be executed.
-
- Request is json-encoded for sending over the wire.
-
- The API is versioned and defaults to version 1.
- """
- def __init__(self, api_version=1, request_id=None):
- self.api_version = api_version
- if request_id:
- self.request_id = request_id
- else:
- self.request_id = str(uuid.uuid1())
- self.ops = []
-
- def add_op_create_pool(self, name, replica_count=3):
- self.ops.append({'op': 'create-pool', 'name': name,
- 'replicas': replica_count})
-
- def set_ops(self, ops):
- """Set request ops to provided value.
-
- Useful for injecting ops that come from a previous request
- to allow comparisons to ensure validity.
- """
- self.ops = ops
-
- @property
- def request(self):
- return json.dumps({'api-version': self.api_version, 'ops': self.ops,
- 'request-id': self.request_id})
-
- def _ops_equal(self, other):
- if len(self.ops) == len(other.ops):
- for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op']:
- if self.ops[req_no][key] != other.ops[req_no][key]:
- return False
- else:
- return False
- return True
-
- def __eq__(self, other):
- if not isinstance(other, self.__class__):
- return False
- if self.api_version == other.api_version and \
- self._ops_equal(other):
- return True
- else:
- return False
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
-
-class CephBrokerRsp(object):
- """Ceph broker response.
-
- Response is json-decoded and contents provided as methods/properties.
-
- The API is versioned and defaults to version 1.
- """
-
- def __init__(self, encoded_rsp):
- self.api_version = None
- self.rsp = json.loads(encoded_rsp)
-
- @property
- def request_id(self):
- return self.rsp.get('request-id')
-
- @property
- def exit_code(self):
- return self.rsp.get('exit-code')
-
- @property
- def exit_msg(self):
- return self.rsp.get('stderr')
-
-
-# Ceph Broker Conversation:
-# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
-# and send that request to ceph via the ceph relation. The CephBrokerRq has a
-# unique id so that the client can identity which CephBrokerRsp is associated
-# with the request. Ceph will also respond to each client unit individually
-# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
-# via key broker-rsp-glance-0
-#
-# To use this the charm can just do something like:
-#
-# from charmhelpers.contrib.storage.linux.ceph import (
-# send_request_if_needed,
-# is_request_complete,
-# CephBrokerRq,
-# )
-#
-# @hooks.hook('ceph-relation-changed')
-# def ceph_changed():
-# rq = CephBrokerRq()
-# rq.add_op_create_pool(name='poolname', replica_count=3)
-#
-# if is_request_complete(rq):
-# <Request complete actions>
-# else:
-# send_request_if_needed(get_ceph_request())
-#
-# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
-# of glance having sent a request to ceph which ceph has successfully processed
-# 'ceph:8': {
-# 'ceph/0': {
-# 'auth': 'cephx',
-# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
-# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
-# 'ceph-public-address': '10.5.44.103',
-# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
-# 'private-address': '10.5.44.103',
-# },
-# 'glance/0': {
-# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
-# '"ops": [{"replicas": 3, "name": "glance", '
-# '"op": "create-pool"}]}'),
-# 'private-address': '10.5.44.109',
-# },
-# }
-
-def get_previous_request(rid):
- """Return the last ceph broker request sent on a given relation
-
- @param rid: Relation id to query for request
- """
- request = None
- broker_req = relation_get(attribute='broker_req', rid=rid,
- unit=local_unit())
- if broker_req:
- request_data = json.loads(broker_req)
- request = CephBrokerRq(api_version=request_data['api-version'],
- request_id=request_data['request-id'])
- request.set_ops(request_data['ops'])
-
- return request
-
-
-def get_request_states(request):
- """Return a dict of requests per relation id with their corresponding
- completion state.
-
- This allows a charm, which has a request for ceph, to see whether there is
- an equivalent request already being processed and if so what state that
- request is in.
-
- @param request: A CephBrokerRq object
- """
- complete = []
- requests = {}
- for rid in relation_ids('ceph'):
- complete = False
- previous_request = get_previous_request(rid)
- if request == previous_request:
- sent = True
- complete = is_request_complete_for_rid(previous_request, rid)
- else:
- sent = False
- complete = False
-
- requests[rid] = {
- 'sent': sent,
- 'complete': complete,
- }
-
- return requests
-
-
-def is_request_sent(request):
- """Check to see if a functionally equivalent request has already been sent
-
- Returns True if a similair request has been sent
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request)
- for rid in states.keys():
- if not states[rid]['sent']:
- return False
-
- return True
-
-
-def is_request_complete(request):
- """Check to see if a functionally equivalent request has already been
- completed
-
- Returns True if a similair request has been completed
-
- @param request: A CephBrokerRq object
- """
- states = get_request_states(request)
- for rid in states.keys():
- if not states[rid]['complete']:
- return False
-
- return True
-
-
-def is_request_complete_for_rid(request, rid):
- """Check if a given request has been completed on the given relation
-
- @param request: A CephBrokerRq object
- @param rid: Relation ID
- """
- broker_key = get_broker_rsp_key()
- for unit in related_units(rid):
- rdata = relation_get(rid=rid, unit=unit)
- if rdata.get(broker_key):
- rsp = CephBrokerRsp(rdata.get(broker_key))
- if rsp.request_id == request.request_id:
- if not rsp.exit_code:
- return True
- else:
- # The remote unit sent no reply targeted at this unit so either the
- # remote ceph cluster does not support unit targeted replies or it
- # has not processed our request yet.
- if rdata.get('broker_rsp'):
- request_data = json.loads(rdata['broker_rsp'])
- if request_data.get('request-id'):
- log('Ignoring legacy broker_rsp without unit key as remote '
- 'service supports unit specific replies', level=DEBUG)
- else:
- log('Using legacy broker_rsp as remote service does not '
- 'supports unit specific replies', level=DEBUG)
- rsp = CephBrokerRsp(rdata['broker_rsp'])
- if not rsp.exit_code:
- return True
-
- return False
-
-
-def get_broker_rsp_key():
- """Return broker response key for this unit
-
- This is the key that ceph is going to use to pass request status
- information back to this unit
- """
- return 'broker-rsp-' + local_unit().replace('/', '-')
-
-
-def send_request_if_needed(request):
- """Send broker request if an equivalent request has not already been sent
-
- @param request: A CephBrokerRq object
- """
- if is_request_sent(request):
- log('Request already sent but not complete, not sending new request',
- level=DEBUG)
- else:
- for rid in relation_ids('ceph'):
- log('Sending request {}'.format(request.request_id), level=DEBUG)
- relation_set(relation_id=rid, broker_req=request.request)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/loopback.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/loopback.py
deleted file mode 100644
index c296f09..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/loopback.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from subprocess import (
- check_call,
- check_output,
-)
-
-import six
-
-
-##################################################
-# loopback device helpers.
-##################################################
-def loopback_devices():
- '''
- Parse through 'losetup -a' output to determine currently mapped
- loopback devices. Output is expected to look like:
-
- /dev/loop0: [0807]:961814 (/tmp/my.img)
-
- :returns: dict: a dict mapping {loopback_dev: backing_file}
- '''
- loopbacks = {}
- cmd = ['losetup', '-a']
- devs = [d.strip().split(' ') for d in
- check_output(cmd).splitlines() if d != '']
- for dev, _, f in devs:
- loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
- return loopbacks
-
-
-def create_loopback(file_path):
- '''
- Create a loopback device for a given backing file.
-
- :returns: str: Full path to new loopback device (eg, /dev/loop0)
- '''
- file_path = os.path.abspath(file_path)
- check_call(['losetup', '--find', file_path])
- for d, f in six.iteritems(loopback_devices()):
- if f == file_path:
- return d
-
-
-def ensure_loopback_device(path, size):
- '''
- Ensure a loopback device exists for a given backing file path and size.
- If it a loopback device is not mapped to file, a new one will be created.
-
- TODO: Confirm size of found loopback device.
-
- :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
- '''
- for d, f in six.iteritems(loopback_devices()):
- if f == path:
- return d
-
- if not os.path.exists(path):
- cmd = ['truncate', '--size', size, path]
- check_call(cmd)
-
- return create_loopback(path)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/lvm.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/lvm.py
deleted file mode 100644
index 34b5f71..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output,
- Popen,
- PIPE,
-)
-
-
-##################################################
-# LVM helpers.
-##################################################
-def deactivate_lvm_volume_group(block_device):
- '''
- Deactivate any volume gruop associated with an LVM physical volume.
-
- :param block_device: str: Full path to LVM physical volume
- '''
- vg = list_lvm_volume_group(block_device)
- if vg:
- cmd = ['vgchange', '-an', vg]
- check_call(cmd)
-
-
-def is_lvm_physical_volume(block_device):
- '''
- Determine whether a block device is initialized as an LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: boolean: True if block device is a PV, False if not.
- '''
- try:
- check_output(['pvdisplay', block_device])
- return True
- except CalledProcessError:
- return False
-
-
-def remove_lvm_physical_volume(block_device):
- '''
- Remove LVM PV signatures from a given block device.
-
- :param block_device: str: Full path of block device to scrub.
- '''
- p = Popen(['pvremove', '-ff', block_device],
- stdin=PIPE)
- p.communicate(input='y\n')
-
-
-def list_lvm_volume_group(block_device):
- '''
- List LVM volume group associated with a given block device.
-
- Assumes block device is a valid LVM PV.
-
- :param block_device: str: Full path of block device to inspect.
-
- :returns: str: Name of volume group associated with block device or None
- '''
- vg = None
- pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
- return vg
-
-
-def create_lvm_physical_volume(block_device):
- '''
- Initialize a block device as an LVM physical volume.
-
- :param block_device: str: Full path of block device to initialize.
-
- '''
- check_call(['pvcreate', block_device])
-
-
-def create_lvm_volume_group(volume_group, block_device):
- '''
- Create an LVM volume group backed by a given block device.
-
- Assumes block device has already been initialized as an LVM PV.
-
- :param volume_group: str: Name of volume group to create.
- :block_device: str: Full path of PV-initialized block device.
- '''
- check_call(['vgcreate', volume_group, block_device])
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/utils.py b/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/utils.py
deleted file mode 100644
index 1e57941..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-from stat import S_ISBLK
-
-from subprocess import (
- check_call,
- check_output,
- call
-)
-
-
-def is_block_device(path):
- '''
- Confirm device at path is a valid block device node.
-
- :returns: boolean: True if path is a block device, False if not.
- '''
- if not os.path.exists(path):
- return False
- return S_ISBLK(os.stat(path).st_mode)
-
-
-def zap_disk(block_device):
- '''
- Clear a block device of partition table. Relies on sgdisk, which is
- installed as pat of the 'gdisk' package in Ubuntu.
-
- :param block_device: str: Full path of block device to clean.
- '''
- # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
- # sometimes sgdisk exits non-zero; this is OK, dd will clean up
- call(['sgdisk', '--zap-all', '--', block_device])
- call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
- dev_end = check_output(['blockdev', '--getsz',
- block_device]).decode('UTF-8')
- gpt_end = int(dev_end.split()[0]) - 100
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=1M', 'count=1'])
- check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
- 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
-
-
-def is_device_mounted(device):
- '''Given a device path, return True if that device is mounted, and False
- if it isn't.
-
- :param device: str: Full path of the device to check.
- :returns: boolean: True if the path represents a mounted device, False if
- it doesn't.
- '''
- is_partition = bool(re.search(r".*[0-9]+\b", device))
- out = check_output(['mount']).decode('UTF-8')
- if is_partition:
- return bool(re.search(device + r"\b", out))
- return bool(re.search(device + r"[0-9]*\b", out))
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/decorators.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/files.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/fstab.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/hookenv.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index ab53a78..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,898 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peer'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peer``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peer'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/host.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/host.py
deleted file mode 100644
index cb3c527..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/hugepage.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 4aaca3f..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/kernel.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/services/base.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3f67783..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/strutils.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/sysctl.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/templating.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/core/unitdata.py b/charms/trusty/contrail-control/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/contrail-control/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/contrail-control/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index efd7f9f..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/contrail-control/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/contrail-control/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f023b26..0000000
--- a/charms/trusty/contrail-control/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-control/hooks/config-changed b/charms/trusty/contrail-control/hooks/config-changed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-api-relation-broken b/charms/trusty/contrail-control/hooks/contrail-api-relation-broken
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-api-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-api-relation-changed b/charms/trusty/contrail-control/hooks/contrail-api-relation-changed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-api-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-api-relation-departed b/charms/trusty/contrail-control/hooks/contrail-api-relation-departed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-api-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-discovery-relation-broken b/charms/trusty/contrail-control/hooks/contrail-discovery-relation-broken
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-discovery-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-discovery-relation-changed b/charms/trusty/contrail-control/hooks/contrail-discovery-relation-changed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-discovery-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-discovery-relation-departed b/charms/trusty/contrail-control/hooks/contrail-discovery-relation-departed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-discovery-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-broken b/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-broken
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-changed b/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-changed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-departed b/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-departed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/contrail-ifmap-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/contrail_control_hooks.py b/charms/trusty/contrail-control/hooks/contrail_control_hooks.py
deleted file mode 100755
index 0c47ec2..0000000
--- a/charms/trusty/contrail-control/hooks/contrail_control_hooks.py
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-
-import json
-
-from charmhelpers.contrib.openstack.utils import configure_installation_source
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- local_unit,
- log,
- relation_get
-)
-
-from charmhelpers.core.host import (
- restart_on_change,
- service_restart
-)
-
-from charmhelpers.fetch import (
- apt_install,
- apt_upgrade,
- configure_sources
-)
-
-from contrail_control_utils import (
- fix_nodemgr,
- fix_permissions,
- provision_control,
- units,
- unprovision_control,
- write_control_config,
- write_nodemgr_config,
- write_vnc_api_config
-)
-
-PACKAGES = [ "contrail-control", "contrail-utils", "contrail-nodemgr" ]
-
-hooks = Hooks()
-config = config()
-
-def add_control():
- # check relation dependencies
- if not config_get("control-provisioned") \
- and config_get("contrail-api-ready") \
- and config_get("contrail-discovery-ready") \
- and config_get("contrail-ifmap-ready") \
- and config_get("identity-admin-ready"):
- provision_control()
- config["control-provisioned"] = True
-
-@hooks.hook("config-changed")
-def config_changed():
- pass
-
-def config_get(key):
- try:
- return config[key]
- except KeyError:
- return None
-
-@hooks.hook("contrail-api-relation-changed")
-def contrail_api_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- write_vnc_api_config()
- config["contrail-api-ready"] = True
- add_control()
-
-@hooks.hook("contrail-api-relation-departed")
-@hooks.hook("contrail-api-relation-broken")
-def contrail_api_departed():
- if not units("contrail-api"):
- remove_control()
- config["contrail-api-ready"] = False
- write_vnc_api_config()
-
-@hooks.hook("contrail-discovery-relation-changed")
-def contrail_discovery_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- contrail_discovery_relation()
- config["contrail-discovery-ready"] = True
- add_control()
-
-@hooks.hook("contrail-discovery-relation-departed")
-@hooks.hook("contrail-discovery-relation-broken")
-def contrail_discovery_departed():
- if not units("contrail-discovery"):
- remove_control()
- config["contrail-discovery-ready"] = False
- contrail_discovery_relation()
-
-@restart_on_change({"/etc/contrail/contrail-control.conf": ["contrail-control"],
- "/etc/contrail/control-node.conf": ["contrail-control"],
- "/etc/contrail/contrail-control-nodemgr.conf": ["contrail-control-nodemgr"]})
-def contrail_discovery_relation():
- write_control_config()
- write_nodemgr_config()
-
-@hooks.hook("contrail-ifmap-relation-changed")
-def contrail_ifmap_changed():
- creds = relation_get("creds")
- creds = json.loads(creds) if creds else {}
- if local_unit() not in creds:
- log("Relation not ready")
- return
- contrail_ifmap_relation()
- config["contrail-ifmap-ready"] = True
- add_control()
-
-@hooks.hook("contrail-ifmap-relation-departed")
-@hooks.hook("contrail-ifmap-relation-broken")
-def contrail_ifmap_departed():
- if not units("contrail-ifmap"):
- remove_control()
- config["contrail-ifmap-ready"] = False
- contrail_ifmap_relation()
-
-@restart_on_change({"/etc/contrail/contrail-control.conf": ["contrail-control"],
- "/etc/contrail/control-node.conf": ["contrail-control"]})
-def contrail_ifmap_relation():
- write_control_config()
-
-@hooks.hook("identity-admin-relation-changed")
-def identity_admin_changed():
- if not relation_get("service_hostname"):
- log("Relation not ready")
- return
- write_vnc_api_config()
- config["identity-admin-ready"] = True
- add_control()
-
-@hooks.hook("identity-admin-relation-departed")
-@hooks.hook("identity-admin-relation-broken")
-def identity_admin_departed():
- if not units("identity-admin"):
- remove_control()
- config["identity-admin-ready"] = False
- write_vnc_api_config()
-
-@hooks.hook()
-def install():
- configure_installation_source(config["openstack-origin"])
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- apt_install(PACKAGES, fatal=True)
- fix_permissions()
- fix_nodemgr()
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
-
-def remove_control():
- if config_get("control-provisioned"):
- unprovision_control()
- config["control-provisioned"] = False
-
-@hooks.hook("upgrade-charm")
-def upgrade_charm():
- write_control_config()
- write_nodemgr_config()
- service_restart("supervisor-control")
-
-if __name__ == "__main__":
- main()
diff --git a/charms/trusty/contrail-control/hooks/contrail_control_utils.py b/charms/trusty/contrail-control/hooks/contrail_control_utils.py
deleted file mode 100644
index fad2d11..0000000
--- a/charms/trusty/contrail-control/hooks/contrail_control_utils.py
+++ /dev/null
@@ -1,249 +0,0 @@
-import functools
-import os
-import pwd
-import shutil
-from socket import gethostbyname, gethostname
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output
-)
-from time import sleep, time
-
-import apt_pkg
-from apt_pkg import version_compare
-import json
-
-from charmhelpers.core.hookenv import (
- local_unit,
- log,
- related_units,
- relation_get,
- relation_ids,
- relation_type,
- remote_unit,
- unit_get
-)
-from charmhelpers.core.host import service_restart
-from charmhelpers.core.templating import render
-
-apt_pkg.init()
-
-def dpkg_version(pkg):
- try:
- return check_output(["dpkg-query", "-f", "${Version}\\n", "-W", pkg]).rstrip()
- except CalledProcessError:
- return None
-
-CONTRAIL_VERSION = dpkg_version("contrail-control")
-
-def retry(f=None, timeout=10, delay=2):
- """Retry decorator.
-
- Provides a decorator that can be used to retry a function if it raises
- an exception.
-
- :param timeout: timeout in seconds (default 10)
- :param delay: retry delay in seconds (default 2)
-
- Examples::
-
- # retry fetch_url function
- @retry
- def fetch_url():
- # fetch url
-
- # retry fetch_url function for 60 secs
- @retry(timeout=60)
- def fetch_url():
- # fetch url
- """
- if not f:
- return functools.partial(retry, timeout=timeout, delay=delay)
- @functools.wraps(f)
- def func(*args, **kwargs):
- start = time()
- error = None
- while True:
- try:
- return f(*args, **kwargs)
- except Exception as e:
- error = e
- elapsed = time() - start
- if elapsed >= timeout:
- raise error
- remaining = timeout - elapsed
- if delay <= remaining:
- sleep(delay)
- else:
- sleep(remaining)
- raise error
- return func
-
-def contrail_api_ctx():
- ctxs = [ { "api_server": gethostbyname(relation_get("private-address", unit, rid)),
- "api_port": port }
- for rid in relation_ids("contrail-api")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def contrail_ctx():
- return { "host_ip": gethostbyname(unit_get("private-address")) }
-
-def contrail_discovery_ctx():
- ctxs = [ { "discovery_server": vip if vip \
- else gethostbyname(relation_get("private-address", unit, rid)),
- "discovery_port": port }
- for rid in relation_ids("contrail-discovery")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def contrail_ifmap_ctx():
- ctxs = []
- unit = local_unit()
- for rid in relation_ids("contrail-ifmap"):
- for u in related_units(rid):
- creds = relation_get("creds", u, rid)
- if creds:
- creds = json.loads(creds)
- if unit in creds:
- cs = creds[unit]
- ctx = {}
- ctx["ifmap_user"] = cs["username"]
- ctx["ifmap_password"] = cs["password"]
- ctxs.append(ctx)
- return ctxs[0] if ctxs else {}
-
-@retry(timeout=300)
-def contrail_provision_control(hostname, ip, router_asn, api_ip, api_port, op,
- user, password, tenant):
- check_call(["contrail-provision-control",
- "--host_name", hostname,
- "--host_ip", ip,
- "--router_asn", str(router_asn),
- "--api_server_ip", api_ip,
- "--api_server_port", str(api_port),
- "--oper", op,
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant_name", tenant])
-
-def fix_nodemgr():
- # add files missing from contrail-nodemgr package
- shutil.copy("files/contrail-nodemgr-control.ini",
- "/etc/contrail/supervisord_control_files")
- pw = pwd.getpwnam("contrail")
- os.chown("/etc/contrail/supervisord_control_files/contrail-nodemgr-control.ini",
- pw.pw_uid, pw.pw_gid)
- shutil.copy("files/contrail-control-nodemgr", "/etc/init.d")
- os.chmod("/etc/init.d/contrail-control-nodemgr", 0755)
-
- # fake ntp status when inside a container
- if is_container():
- shutil.copy("files/ntpq-nodemgr", "/usr/local/bin/ntpq")
-
- service_restart("supervisor-control")
-
-def fix_permissions():
- os.chmod("/etc/contrail", 0755)
- os.chown("/etc/contrail", 0, 0)
-
-def identity_admin_ctx():
- ctxs = [ { "auth_host": gethostbyname(hostname),
- "auth_port": relation_get("service_port", unit, rid) }
- for rid in relation_ids("identity-admin")
- for unit, hostname in
- ((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
- if hostname ]
- return ctxs[0] if ctxs else {}
-
-def is_container():
- """Return boolean determining if inside container"""
- try:
- check_call(["running-in-container"])
- return True
- except CalledProcessError:
- return False
-
-def provision_control():
- hostname = gethostname()
- ip = gethostbyname(unit_get("private-address"))
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- port)
- for rid in relation_ids("contrail-api")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ][0]
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid)
- if relation_get("service_hostname", unit, rid) ][0]
- log("Provisioning control {}".format(ip))
- contrail_provision_control(hostname, ip, 64512, api_ip, api_port, "add",
- user, password, tenant)
-
-def units(relation):
- """Return a list of units for the specified relation"""
- return [ unit for rid in relation_ids(relation)
- for unit in related_units(rid) ]
-
-def unprovision_control():
- if not remote_unit():
- return
- hostname = gethostname()
- ip = gethostbyname(unit_get("private-address"))
- relation = relation_type()
- api_ip = None
- api_port = None
- if relation == "contrail-api":
- api_ip = gethostbyname(relation_get("private-address"))
- api_port = relation_get("port")
- else:
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- relation_get("port", unit, rid))
- for rid in relation_ids("contrail-api")
- for unit in related_units(rid) ][0]
- user = None
- password = None
- tenant = None
- if relation == "identity-admin":
- user = relation_get("service_username")
- password = relation_get("service_password")
- tenant = relation_get("service_tenant_name")
- else:
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Unprovisioning control {}".format(ip))
- contrail_provision_control(hostname, ip, 64512, api_ip, api_port, "del",
- user, password, tenant)
-
-def write_control_config():
- ctx = {}
- ctx.update(contrail_ctx())
- ctx.update(contrail_discovery_ctx())
- ctx.update(contrail_ifmap_ctx())
- target = "/etc/contrail/contrail-control.conf" \
- if version_compare(CONTRAIL_VERSION, "2.0") >= 0 \
- else "/etc/contrail/control-node.conf"
- render("control-node.conf", target, ctx, "root", "contrail", 0440)
-
-def write_nodemgr_config():
- ctx = contrail_discovery_ctx()
- render("contrail-control-nodemgr.conf",
- "/etc/contrail/contrail-control-nodemgr.conf", ctx)
-
-def write_vnc_api_config():
- ctx = {}
- ctx.update(contrail_api_ctx())
- ctx.update(identity_admin_ctx())
- render("vnc_api_lib.ini", "/etc/contrail/vnc_api_lib.ini", ctx)
diff --git a/charms/trusty/contrail-control/hooks/identity-admin-relation-broken b/charms/trusty/contrail-control/hooks/identity-admin-relation-broken
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/identity-admin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/identity-admin-relation-changed b/charms/trusty/contrail-control/hooks/identity-admin-relation-changed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/identity-admin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/identity-admin-relation-departed b/charms/trusty/contrail-control/hooks/identity-admin-relation-departed
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/identity-admin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/install b/charms/trusty/contrail-control/hooks/install
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/start b/charms/trusty/contrail-control/hooks/start
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/stop b/charms/trusty/contrail-control/hooks/stop
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/hooks/upgrade-charm b/charms/trusty/contrail-control/hooks/upgrade-charm
deleted file mode 120000
index d4237b7..0000000
--- a/charms/trusty/contrail-control/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-contrail_control_hooks.py \ No newline at end of file
diff --git a/charms/trusty/contrail-control/icon.svg b/charms/trusty/contrail-control/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/contrail-control/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/contrail-control/metadata.yaml b/charms/trusty/contrail-control/metadata.yaml
deleted file mode 100644
index fe57df1..0000000
--- a/charms/trusty/contrail-control/metadata.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: contrail-control
-summary: OpenContrail Control Node
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- OpenContrail is a network virtualization solution that provides an overlay
- virtual-network to virtual-machines, containers or network namespaces.
- .
- This charm provides the control node component.
-categories:
- - openstack
-provides:
- control-node:
- interface: contrail-control
-requires:
- contrail-api:
- interface: contrail-api
- contrail-discovery:
- interface: contrail-discovery
- contrail-ifmap:
- interface: contrail-ifmap
- identity-admin:
- interface: keystone-admin
diff --git a/charms/trusty/contrail-control/templates/contrail-control-nodemgr.conf b/charms/trusty/contrail-control/templates/contrail-control-nodemgr.conf
deleted file mode 100644
index ffea798..0000000
--- a/charms/trusty/contrail-control/templates/contrail-control-nodemgr.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DISCOVERY]
-server = {{ discovery_server }}
-port = {{ discovery_port }}
-
diff --git a/charms/trusty/contrail-control/templates/control-node.conf b/charms/trusty/contrail-control/templates/control-node.conf
deleted file mode 100644
index 73d6bb8..0000000
--- a/charms/trusty/contrail-control/templates/control-node.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULT]
-hostip = {{ host_ip }}
-
-[DISCOVERY]
-server = {{ discovery_server }}
-port = {{ discovery_port }}
-
-[IFMAP]
-user = {{ ifmap_user }}
-password = {{ ifmap_password }}
-
diff --git a/charms/trusty/contrail-control/templates/vnc_api_lib.ini b/charms/trusty/contrail-control/templates/vnc_api_lib.ini
deleted file mode 100644
index fd68487..0000000
--- a/charms/trusty/contrail-control/templates/vnc_api_lib.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[global]
-WEB_SERVER = {{ api_server }}
-WEB_PORT = {{ api_port }}
-
-[auth]
-AUTHN_TYPE = keystone
-AUTHN_PROTOCOL = http
-AUTHN_SERVER = {{ auth_host }}
-AUTHN_PORT = {{ auth_port }}
-AUTHN_URL = /v2.0/tokens
-
diff --git a/charms/trusty/contrail-webui/.bzrignore b/charms/trusty/contrail-webui/.bzrignore
deleted file mode 100644
index 162e2ba..0000000
--- a/charms/trusty/contrail-webui/.bzrignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.venv
-.project
-.pydevproject
-bin
diff --git a/charms/trusty/contrail-webui/.project b/charms/trusty/contrail-webui/.project
deleted file mode 100644
index 90aded7..0000000
--- a/charms/trusty/contrail-webui/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<projectDescription>
- <name>contrail-webui</name>
- <comment></comment>
- <projects>
- </projects>
- <buildSpec>
- <buildCommand>
- <name>org.python.pydev.PyDevBuilder</name>
- <arguments>
- </arguments>
- </buildCommand>
- </buildSpec>
- <natures>
- <nature>org.python.pydev.pythonNature</nature>
- </natures>
-</projectDescription>
diff --git a/charms/trusty/contrail-webui/.pydevproject b/charms/trusty/contrail-webui/.pydevproject
deleted file mode 100644
index 407a838..0000000
--- a/charms/trusty/contrail-webui/.pydevproject
+++ /dev/null
@@ -1,9 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?eclipse-pydev version="1.0"?><pydev_project>
-
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
-<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
-<path>/${PROJECT_DIR_NAME}/hooks</path>
-</pydev_pathproperty>
-</pydev_project>
diff --git a/charms/trusty/contrail-webui/Makefile b/charms/trusty/contrail-webui/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/contrail-webui/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/contrail-webui/README.md b/charms/trusty/contrail-webui/README.md
deleted file mode 100644
index 0f5eaa7..0000000
--- a/charms/trusty/contrail-webui/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This charm provides the Web UI component which contains the
-contrail-web-controller service.
-Only OpenStack Icehouse or newer is supported.
-
-Usage
------
-
-Keystone, Contrail Configuration and Cassandra are prerequisite services to
-deploy.
-
-Once ready, deploy and relate as follows:
-
- juju deploy contrail-webui
- juju add-relation contrail-webui keystone
- juju add-relation contrail-webui:contrail_api contrail-configuration:contrail-api
- juju add-relation contrail-webui:contrail_discovery contrail-configuration:contrail-discovery
- juju add-relation contrail-webui:cassandra cassandra:database
-
-Install Sources
----------------
-
-The version of OpenContrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
-
-Secure HTTPS/SSL Connections
-----------------------------
-
-HTTPS is enabled by default (port 8143) and also set with:
-
- juju set contrail-webui use-https=true
-
-A self-signed X.509 certificate will be generated for SSL use by default, but
-you may specify one with the 'ssl-cert' and 'ssl-key' options. This is easier to
-do using a YAML file:
-
- # config.yaml
- contrail-webui:
- ssl-cert: |
- -----BEGIN CERTIFICATE-----
- ...
- -----END CERTIFICATE-----
- ssl-key: |
- -----BEGIN PRIVATE KEY-----
- ...
- -----END PRIVATE KEY-----
-
- juju set --config config.yaml contrail-webui
-
-High Availability (HA)
-----------------------
-
-Multiple units of this charm can be deployed to support HA deployments:
-
- juju add-unit contrail-webui
-
-Relating to haproxy charm (website relation) allows multiple units to be load
-balanced:
-
- juju add-relation contrail-webui haproxy
diff --git a/charms/trusty/contrail-webui/charm-helpers-sync.yaml b/charms/trusty/contrail-webui/charm-helpers-sync.yaml
deleted file mode 100644
index 0af5672..0000000
--- a/charms/trusty/contrail-webui/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
diff --git a/charms/trusty/contrail-webui/config.yaml b/charms/trusty/contrail-webui/config.yaml
deleted file mode 100644
index 99a3d7c..0000000
--- a/charms/trusty/contrail-webui/config.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-options:
- install-sources:
- type: string
- default: |
- - "ppa:opencontrail/ppa"
- - "ppa:opencontrail/r2.20"
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
- http-port:
- type: int
- default: 8080
- description: Port to listen for HTTP requests on.
- https-port:
- type: int
- default: 8143
- description: Port to listen for HTTPS requests on.
- use-https:
- type: boolean
- default: true
- description: Use HTTPS. HTTP requests will be redirected to HTTPS.
- ssl-cert:
- type: string
- description: |
- PEM encoded X.509 certificate for use in SSL.
- A self-signed certificate will be generated for use if ssl-cert and
- ssl-key are not set.
- ssl-key:
- type: string
- description: |
- PEM encoded private key for use in SSL.
- A self-signed certificate will be generated for use if ssl-cert and
- ssl-key are not set.
- logo-url:
- type: string
- default: ""
- description: |
- Optional URL to an image file with the site logo
- to be used.
- NOTE: it will get downloaded and cached every time
- the config is updated. If empty, the default will
- be used.
- favicon-url:
- type: string
- default: ""
- description: |
- Optional URL to an icon file with the site favicon
- to be used.
- NOTE: it will get downloaded and cached every time
- the config is updated. If empty, the default will
- be used.
diff --git a/charms/trusty/contrail-webui/copyright b/charms/trusty/contrail-webui/copyright
deleted file mode 100644
index 567db82..0000000
--- a/charms/trusty/contrail-webui/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-webui/files/40contrail b/charms/trusty/contrail-webui/files/40contrail
deleted file mode 100644
index d5f2e14..0000000
--- a/charms/trusty/contrail-webui/files/40contrail
+++ /dev/null
@@ -1,4 +0,0 @@
-Explanation: Use contrail version of nodejs
-Package: nodejs
-Pin: version /contrail/
-Pin-Priority: 1001
diff --git a/charms/trusty/contrail-webui/files/contrail-webui b/charms/trusty/contrail-webui/files/contrail-webui
deleted file mode 100644
index cc44e97..0000000
--- a/charms/trusty/contrail-webui/files/contrail-webui
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network Virtualization WebUI
-
-supervisorctl -s unix:///tmp/supervisord_webui.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-webui/files/contrail-webui-contrail.ini b/charms/trusty/contrail-webui/files/contrail-webui-contrail.ini
deleted file mode 100644
index a308c9c..0000000
--- a/charms/trusty/contrail-webui/files/contrail-webui-contrail.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-[program:contrail-webui]
-directory=/usr/src/contrail/contrail-web-core
-command=bash -c "node webServerStart.js"
-priority=420
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-webui-stdout.log
-stderr_logfile=/dev/null
-startretries=10
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
diff --git a/charms/trusty/contrail-webui/files/contrail-webui-middleware b/charms/trusty/contrail-webui/files/contrail-webui-middleware
deleted file mode 100644
index e0d22c2..0000000
--- a/charms/trusty/contrail-webui/files/contrail-webui-middleware
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network Virtualization WebUI Middleware
-
-supervisorctl -s unix:///tmp/supervisord_webui.sock ${1} `basename ${0}`
diff --git a/charms/trusty/contrail-webui/files/contrail-webui-middleware-contrail.ini b/charms/trusty/contrail-webui/files/contrail-webui-middleware-contrail.ini
deleted file mode 100644
index ff12d91..0000000
--- a/charms/trusty/contrail-webui/files/contrail-webui-middleware-contrail.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-[program:contrail-webui-middleware]
-directory=/usr/src/contrail/contrail-web-core
-command=bash -c "node jobServerStart.js"
-priority=420
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-webui-middleware-stdout.log
-stderr_logfile=/dev/null
-startretries=10
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
diff --git a/charms/trusty/contrail-webui/files/contrail-webui-middleware-opencontrail.ini b/charms/trusty/contrail-webui/files/contrail-webui-middleware-opencontrail.ini
deleted file mode 100644
index 05942b4..0000000
--- a/charms/trusty/contrail-webui/files/contrail-webui-middleware-opencontrail.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-[program:contrail-webui-middleware]
-directory=/var/lib/contrail-webui/contrail-web-core
-command=bash -c "node jobServerStart.js"
-priority=420
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-webui-middleware-stdout.log
-stderr_logfile=/dev/null
-startretries=10
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
diff --git a/charms/trusty/contrail-webui/files/contrail-webui-opencontrail.ini b/charms/trusty/contrail-webui/files/contrail-webui-opencontrail.ini
deleted file mode 100644
index 4fa2c57..0000000
--- a/charms/trusty/contrail-webui/files/contrail-webui-opencontrail.ini
+++ /dev/null
@@ -1,14 +0,0 @@
-[program:contrail-webui]
-directory=/var/lib/contrail-webui/contrail-web-core
-command=bash -c "node webServerStart.js"
-priority=420
-autostart=true
-killasgroup=true
-stopsignal=KILL
-stdout_capture_maxbytes=1MB
-redirect_stderr=true
-stdout_logfile=/var/log/contrail/contrail-webui-stdout.log
-stderr_logfile=/dev/null
-startretries=10
-startsecs=5
-exitcodes=0 ; 'expected' exit codes for process (default 0,2)
diff --git a/charms/trusty/contrail-webui/files/supervisor-webui.conf b/charms/trusty/contrail-webui/files/supervisor-webui.conf
deleted file mode 100644
index 37b8e25..0000000
--- a/charms/trusty/contrail-webui/files/supervisor-webui.conf
+++ /dev/null
@@ -1,36 +0,0 @@
-description "Supervisord for VNC web-ui"
-
-start on runlevel [2345]
-stop on runlevel [016]
-limit core unlimited unlimited
-
-# Restart the process if it dies with a signal
-# or exit code not given by the 'normal exit' stanza.
-respawn
-
-# Give up if restart occurs 10 times in 90 seconds.
-respawn limit 10 90
-
-pre-start script
- ulimit -s unlimited
- ulimit -c unlimited
- ulimit -d unlimited
- ulimit -v unlimited
- ulimit -n 4096
-end script
-
-script
- supervisord --nodaemon -c /etc/contrail/supervisord_webui.conf || true
- echo "supervisor-webui start failed...."
- (lsof | grep -i supervisord_webui.sock) || true
- pid=`lsof | grep -i supervisord_webui.sock | cut -d' ' -f3` || true
- if [ "x$pid" != "x" ]; then
- ps uw -p $pid
- fi
-end script
-
-pre-stop script
- supervisorctl -s unix:///tmp/supervisord_webui.sock stop all
- supervisorctl -s unix:///tmp/supervisord_webui.sock shutdown
- /usr/bin/supervisor_killall /etc/contrail/supervisord_webui_files
-end script
diff --git a/charms/trusty/contrail-webui/files/supervisord_webui.conf b/charms/trusty/contrail-webui/files/supervisord_webui.conf
deleted file mode 100644
index f0eb90a..0000000
--- a/charms/trusty/contrail-webui/files/supervisord_webui.conf
+++ /dev/null
@@ -1,140 +0,0 @@
-; Sample supervisor config file.
-;
-; For more information on the config file, please see:
-; http://supervisord.org/configuration.html
-;
-; Note: shell expansion ("~" or "$HOME") is not supported. Environment
-; variables can be expanded using this syntax: "%(ENV_HOME)s".
-
-[unix_http_server]
-file=/tmp/supervisord_webui.sock ; (the path to the socket file)
-chmod=0700 ; socket file mode (default 0700)
-;chown=nobody:nogroup ; socket file uid:gid owner
-;username=user ; (default is no username (open server))
-;password=123 ; (default is no password (open server))
-
-;[inet_http_server] ; inet (TCP) server disabled by default
-;port=localhost:9008 ; Port for analytics (ip_address:port specifier, *:port for all iface)
-;username=user ; (default is no username (open server))
-;password=123 ; (default is no password (open server))
-
-[supervisord]
-logfile=/var/log/contrail/supervisord-webui.log ; (main log file;default $CWD/supervisord.log)
-logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
-logfile_backups=3 ; (num of main logfile rotation backups;default 10)
-loglevel=info ; (log level;default info; others: debug,warn,trace)
-pidfile=/var/run/supervisord_webui.pid ; (supervisord pidfile;default supervisord.pid)
-nodaemon=false ; (start in foreground if true;default false)
-minfds=1024 ; (min. avail startup file descriptors;default 1024)
-minprocs=200 ; (min. avail process descriptors;default 200)
-;umask=022 ; (process file creation umask;default 022)
-;user=chrism ; (default is current user, required if root)
-;identifier=supervisor ; (supervisord identifier, default is 'supervisor')
-;directory=/tmp ; (default is not to cd during start)
-;nocleanup=true ; (don't clean up tempfiles at start;default false)
-childlogdir=/var/log/contrail ; ('AUTO' child log dir, default $TEMP)
-;environment=KEY=value ; (key value pairs to add to environment)
-;strip_ansi=false ; (strip ansi escape codes in logs; def. false)
-
-; the below section must remain in the config file for RPC
-; (supervisorctl/web interface) to work, additional interfaces may be
-; added by defining them in separate rpcinterface: sections
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl=unix:///tmp/supervisord_webui.sock ; use a unix:// URL for a unix socket
-;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket
-;username=chris ; should be same as http_username if set
-;password=123 ; should be same as http_password if set
-;prompt=mysupervisor ; cmd line prompt (default "supervisor")
-;history_file=~/.sc_history ; use readline history if available
-
-; The below sample program section shows all possible program subsection values,
-; create one or more 'real' program: sections to be able to control them under
-; supervisor.
-
-;[program:theprogramname]
-;command=/bin/cat ; the program (relative uses PATH, can take args)
-;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
-;numprocs=1 ; number of processes copies to start (def 1)
-;directory=/tmp ; directory to cwd to before exec (def no cwd)
-;umask=022 ; umask for process (default None)
-;priority=999 ; the relative start priority (default 999)
-;autostart=true ; start at supervisord start (default: true)
-;autorestart=unexpected ; whether/when to restart (default: unexpected)
-;startsecs=1 ; number of secs prog must stay running (def. 1)
-;startretries=3 ; max # of serial start failures (default 3)
-;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
-;stopsignal=QUIT ; signal used to kill process (default TERM)
-;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
-;stopasgroup=false ; send stop signal to the UNIX process group (default false)
-;killasgroup=false ; SIGKILL the UNIX process group (def false)
-;user=chrism ; setuid to this UNIX account to run the program
-;redirect_stderr=true ; redirect proc stderr to stdout (default false)
-;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
-;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
-;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
-;stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
-;stdout_events_enabled=false ; emit events on stdout writes (default false)
-;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
-;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
-;stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)
-;stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
-;stderr_events_enabled=false ; emit events on stderr writes (default false)
-;environment=A=1,B=2 ; process environment additions (def no adds)
-;serverurl=AUTO ; override serverurl computation (childutils)
-
-; The below sample eventlistener section shows all possible
-; eventlistener subsection values, create one or more 'real'
-; eventlistener: sections to be able to handle event notifications
-; sent by supervisor.
-
-;[eventlistener:theeventlistenername]
-;command=/bin/eventlistener ; the program (relative uses PATH, can take args)
-;process_name=%(program_name)s ; process_name expr (default %(program_name)s)
-;numprocs=1 ; number of processes copies to start (def 1)
-;events=EVENT ; event notif. types to subscribe to (req'd)
-buffer_size=10000 ; event buffer queue size (default 10)
-;directory=/tmp ; directory to cwd to before exec (def no cwd)
-;umask=022 ; umask for process (default None)
-;priority=-1 ; the relative start priority (default -1)
-;autostart=true ; start at supervisord start (default: true)
-;autorestart=unexpected ; whether/when to restart (default: unexpected)
-;startsecs=1 ; number of secs prog must stay running (def. 1)
-;startretries=3 ; max # of serial start failures (default 3)
-;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
-;stopsignal=QUIT ; signal used to kill process (default TERM)
-;stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
-;stopasgroup=false ; send stop signal to the UNIX process group (default false)
-;killasgroup=false ; SIGKILL the UNIX process group (def false)
-;user=chrism ; setuid to this UNIX account to run the program
-;redirect_stderr=true ; redirect proc stderr to stdout (default false)
-;stdout_logfile=/a/path ; stdout log path, NONE for none; default AUTO
-;stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
-;stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
-;stdout_events_enabled=false ; emit events on stdout writes (default false)
-;stderr_logfile=/a/path ; stderr log path, NONE for none; default AUTO
-;stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
-;stderr_logfile_backups ; # of stderr logfile backups (default 10)
-;stderr_events_enabled=false ; emit events on stderr writes (default false)
-;environment=A=1,B=2 ; process environment additions
-;serverurl=AUTO ; override serverurl computation (childutils)
-
-; The below sample group section shows all possible group values,
-; create one or more 'real' group: sections to create "heterogeneous"
-; process groups.
-
-;[group:contrail-webui]
-;programs=contrail-webui,contrail-webui-middleware; each refers to 'x' in [program:x] definitions
-;priority=999 ; the relative start priority (default 999)
-
-; The [include] section can just contain the "files" setting. This
-; setting can list multiple files (separated by whitespace or
-; newlines). It can also contain wildcards. The filenames are
-; interpreted as relative to this file. Included files *cannot*
-; include files themselves.
-
-[include]
-files = /etc/contrail/supervisord_webui_files/*.ini
-
diff --git a/charms/trusty/contrail-webui/hooks/actions.py b/charms/trusty/contrail-webui/hooks/actions.py
deleted file mode 100644
index a8e3adf..0000000
--- a/charms/trusty/contrail-webui/hooks/actions.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from charmhelpers.core import hookenv
-
-
-def log_start(service_name):
- hookenv.log('{0} starting'.format(service_name))
diff --git a/charms/trusty/contrail-webui/hooks/cassandra-relation-changed b/charms/trusty/contrail-webui/hooks/cassandra-relation-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/cassandra-relation-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/cassandra-relation-joined b/charms/trusty/contrail-webui/hooks/cassandra-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/cassandra-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/__init__.py b/charms/trusty/contrail-webui/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/__init__.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/decorators.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/files.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/fstab.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/hookenv.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index ab53a78..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,898 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peer'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peer``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peer'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/host.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/host.py
deleted file mode 100644
index cb3c527..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/hugepage.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 4aaca3f..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/kernel.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/base.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3f67783..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/strutils.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/sysctl.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/templating.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/core/unitdata.py b/charms/trusty/contrail-webui/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index efd7f9f..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f023b26..0000000
--- a/charms/trusty/contrail-webui/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/contrail-webui/hooks/config-changed b/charms/trusty/contrail-webui/hooks/config-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/config-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/contrail_api-relation-changed b/charms/trusty/contrail-webui/hooks/contrail_api-relation-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/contrail_api-relation-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/contrail_api-relation-joined b/charms/trusty/contrail-webui/hooks/contrail_api-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/contrail_api-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/contrail_discovery-relation-changed b/charms/trusty/contrail-webui/hooks/contrail_discovery-relation-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/contrail_discovery-relation-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/contrail_discovery-relation-joined b/charms/trusty/contrail-webui/hooks/contrail_discovery-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/contrail_discovery-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/identity_admin-relation-changed b/charms/trusty/contrail-webui/hooks/identity_admin-relation-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/identity_admin-relation-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/identity_admin-relation-joined b/charms/trusty/contrail-webui/hooks/identity_admin-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/identity_admin-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/install b/charms/trusty/contrail-webui/hooks/install
deleted file mode 100755
index d981389..0000000
--- a/charms/trusty/contrail-webui/hooks/install
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/python
-
-import shutil
-
-import setup
-setup.pre_install()
-
-from charmhelpers.core import hookenv
-from charmhelpers.fetch import (
- configure_sources,
- apt_upgrade,
- apt_install,
-)
-
-PACKAGES = [ "contrail-web-controller", "nodejs",
- "supervisor", "contrail-utils" ]
-
-def install():
- hookenv.log('Installing contrail-webui')
-
- # set apt preferences
- shutil.copy('files/40contrail', '/etc/apt/preferences.d')
-
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- apt_install(PACKAGES, fatal=True)
- setup.fix_services()
-
-if __name__ == "__main__":
- install()
diff --git a/charms/trusty/contrail-webui/hooks/leader-settings-changed b/charms/trusty/contrail-webui/hooks/leader-settings-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/leader-settings-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/redis-relation-changed b/charms/trusty/contrail-webui/hooks/redis-relation-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/redis-relation-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/redis-relation-joined b/charms/trusty/contrail-webui/hooks/redis-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/redis-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/services.py b/charms/trusty/contrail-webui/hooks/services.py
deleted file mode 100644
index 61c58cb..0000000
--- a/charms/trusty/contrail-webui/hooks/services.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import os
-import socket
-import urllib
-
-import yaml
-
-import actions
-from charmhelpers.core import hookenv
-from charmhelpers.core import services
-from charmhelpers.core import templating
-
-from setup import (
- create_ssl_certificate,
- is_opencontrail,
- write_ssl_certificate
-)
-
-
-CONFIG_FILE = os.path.join(os.sep, 'etc', 'contrail', 'config.global.js')
-
-
-class CassandraRelation(services.RelationContext):
- name = 'cassandra'
- interface = 'cassandra'
- required_keys = ['private-address']
-
-
-class ContrailAPIRelation(services.RelationContext):
- name = 'contrail_api'
- interface = 'contrail-api'
- required_keys = ['private-address', 'port']
-
-
-class ContrailDiscoveryRelation(services.RelationContext):
- name = 'contrail_discovery'
- interface = 'contrail-discovery'
- required_keys = ['private-address', 'port']
-
-
-class KeystoneRelation(services.RelationContext):
- name = 'identity_admin'
- interface = 'keystone-admin'
- required_keys = ['service_hostname', 'service_port', 'service_username',
- 'service_tenant_name', 'service_password']
-
-
-class RedisRelation(services.RelationContext):
- name = 'redis'
- interface = 'redis-master'
- required_keys = ['hostname', 'port']
-
-
-class ContrailWebUIConfig(services.ManagerCallback):
-
- context_contrail = {
- 'webcontroller_path': '/usr/src/contrail/contrail-web-controller',
- 'logo_file': '/usr/src/contrail/contrail-web-core/webroot/img/juniper-networks-logo.png',
- 'favicon_file': '/usr/src/contrail/contrail-web-core/webroot/img/juniper-networks-favicon.ico'
- }
-
- context_opencontrail = {
- 'webcontroller_path': '/var/lib/contrail-webui/contrail-web-controller',
- 'logo_file': '/var/lib/contrail/contrail-web-core/webroot/img/opencontrail-logo.png',
- 'favicon_file': '/var/lib/contrail/contrail-web-core/webroot/img/opencontrail-favicon.ico'
- }
-
- def __call__(self, manager, service_name, event_name):
- config = hookenv.config()
- context = {
- 'config': config
- }
-
- context.update(self.context_opencontrail if is_opencontrail()
- else self.context_contrail)
-
- context.update(ContrailAPIRelation())
- context.update(ContrailDiscoveryRelation())
- context.update(CassandraRelation())
- context.update(KeystoneRelation())
-
- # Redis relation is optional
- redis = RedisRelation()
- if redis.is_ready():
- context.update(redis)
- else:
- context.update({
- 'redis': [{
- 'hostname': '127.0.0.1',
- 'port': '6379'
- }]
- })
-
- # Download logo and favicon or use the cached one
- # if failed, falling back to the defaults
- for target in ('logo', 'favicon'):
- url = context['config']['{0}-url'.format(target)]
- filename = os.path.join(os.sep, 'etc', 'contrail',
- os.path.basename(url))
- context['config']['{0}-filename'.format(target)] = ''
- if url:
- try:
- urllib.urlretrieve(url, filename)
- except IOError:
- pass
-
- try:
- if os.stat(filename).st_size > 0:
- context['config']['{0}-filename'.format(target)] = (
- filename
- )
- except OSError:
- pass
-
- templating.render(
- context=context,
- source='config.global.js.j2',
- target=CONFIG_FILE,
- perms=0o644
- )
-
- templating.render(
- context=context,
- source='contrail-webui-userauth.js',
- target='/etc/contrail/contrail-webui-userauth.js',
- perms=0o640,
- owner='root',
- group='contrail'
- )
-
-
-class SSLConfig(services.ManagerCallback):
- def __call__(self, manager, service_name, event_name):
- if hookenv.is_leader():
- config = hookenv.config()
- cert = config.get('ssl-cert')
- key = config.get('ssl-key')
- if cert and key:
- write_ssl_certificate(cert, key)
- hookenv.leader_set({'ssl-cert': cert, 'ssl-key': key,
- 'ssl-cert-created': ''})
- elif not hookenv.leader_get('ssl-cert-created'):
- cert, key = create_ssl_certificate()
- hookenv.leader_set({'ssl-cert': cert, 'ssl-key': key,
- 'ssl-cert-created': True})
-
-
-class LeaderCallback(services.ManagerCallback):
- def __call__(self, manager, service_name, event_name):
- if not hookenv.is_leader():
- cert = hookenv.leader_get('ssl-cert')
- key = hookenv.leader_get('ssl-key')
- if cert and key:
- write_ssl_certificate(cert, key)
-
-
-class ContrailWebRelation(services.ManagerCallback):
- def __call__(self, manager, service_name, event_name):
- config = hookenv.config()
- name = hookenv.local_unit().replace('/', '-')
- addr = socket.gethostbyname(hookenv.unit_get('private-address'))
- http_port = config['http-port']
- https_port = config['https-port']
- services = [ { 'service_name': 'contrail-webui-http',
- 'service_host': '0.0.0.0',
- 'service_port': http_port,
- 'service_options': [ 'mode http', 'balance leastconn', 'option httpchk GET / HTTP/1.1\\r\\nHost:\\ www', 'stick on src table contrail-webui-https' ],
- 'servers': [ [ name, addr, http_port, 'check' ] ] },
- { 'service_name': 'contrail-webui-https',
- 'service_host': '0.0.0.0',
- 'service_port': https_port,
- 'service_options': [ 'mode tcp', 'balance leastconn', 'stick-table type ip size 10k expire 25h', 'stick on src' ],
- 'servers': [ [ name, addr, https_port, 'check' ] ] } ]
-
- for relation in hookenv.relation_ids('website'):
- hookenv.relation_set(relation, services=yaml.dump(services))
-
-
-def manage():
- config = hookenv.config()
- cassandra = CassandraRelation()
- contrail_api = ContrailAPIRelation()
- contrail_discovery = ContrailDiscoveryRelation()
- keystone = KeystoneRelation()
-
- config_callback = ContrailWebUIConfig()
- ssl_callback = SSLConfig()
- leader_callback = LeaderCallback()
- website_callback = ContrailWebRelation()
-
- manager = services.ServiceManager([
- {
- 'service': 'supervisor-webui',
- 'ports': (config['http-port'], config['https-port']),
- 'required_data': [
- config,
- cassandra,
- contrail_api,
- contrail_discovery,
- keystone,
- ],
- 'data_ready': [
- actions.log_start,
- config_callback,
- ssl_callback,
- leader_callback,
- website_callback,
- ],
- },
- ])
- manager.manage()
diff --git a/charms/trusty/contrail-webui/hooks/setup.py b/charms/trusty/contrail-webui/hooks/setup.py
deleted file mode 100644
index ae1e7e3..0000000
--- a/charms/trusty/contrail-webui/hooks/setup.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os
-import pwd
-import shutil
-import subprocess
-
-from charmhelpers.core.hookenv import log
-
-from charmhelpers.core.host import (
- adduser,
- mkdir,
- service_available,
- service_restart,
- service_stop,
- user_exists
-)
-
-def pre_install():
- """Do any setup required before the install hook."""
- install_charmhelpers()
-
-
-def install_charmhelpers():
- """Install the charmhelpers library, if not present."""
- try:
- import charmhelpers # noqa
- except ImportError:
- import subprocess
- subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
- subprocess.check_call(['pip', 'install', 'charmhelpers'])
-
-
-def create_ssl_certificate():
- base = web_install_dir()
- cert = base + '/contrail-web-core/keys/cs-cert.pem'
- key = base + '/contrail-web-core/keys/cs-key.pem'
- log('Creating self-signed X.509 certificate...')
- subprocess.check_call(['openssl', 'req', '-x509',
- '-subj', '/CN=contrail-juju',
- '-days', '3650', '-newkey', 'rsa:2048', '-nodes',
- '-out', cert, '-keyout', key])
- log('...created self-signed X.509 certificate')
- with open(cert, 'r') as f:
- c = f.read()
- with open(key, 'r') as f:
- k = f.read()
- return c, k
-
-
-def is_opencontrail():
- return os.path.exists('/var/lib/contrail-webui')
-
-
-def fix_permissions():
- """Fix package permissions."""
- os.chmod('/etc/contrail', 0o755)
- os.chown('/etc/contrail', 0, 0)
-
- os.chmod(web_install_dir() + '/contrail-web-core/keys/cs-key.pem', 0o600)
-
-
-def fix_services():
- fix_permissions()
- fix_supervisord()
- fix_webui()
- fix_webui_middleware()
- service_restart('supervisor-webui')
-
-
-def fix_supervisord():
- # setup supervisord
- if not user_exists('contrail'):
- adduser('contrail', system_user=True)
-
- shutil.copy('files/supervisor-webui.conf', '/etc/init')
- shutil.copy('files/supervisord_webui.conf', '/etc/contrail')
- pw = pwd.getpwnam('contrail')
- os.chown('/etc/contrail/supervisord_webui.conf', pw.pw_uid, pw.pw_gid)
- mkdir('/etc/contrail/supervisord_webui_files', owner='contrail',
- group='contrail', perms=0o755)
-
- mkdir('/var/log/contrail', owner='contrail', group='adm', perms=0o750)
-
-
-def fix_webui():
- # disable webui upstart service
- if service_available('contrail-webui-webserver'):
- service_stop('contrail-webui-webserver')
- with open('/etc/init/contrail-webui-webserver.override', 'w') as conf:
- conf.write('manual\n')
-
- # use supervisord config
- conf = 'files/contrail-webui-opencontrail.ini' \
- if is_opencontrail() \
- else 'files/contrail-webui-contrail.ini'
- shutil.copy(conf, '/etc/contrail/supervisord_webui_files/contrail-webui.ini')
- pw = pwd.getpwnam('contrail')
- os.chown('/etc/contrail/supervisord_webui_files/contrail-webui.ini',
- pw.pw_uid, pw.pw_gid)
- shutil.copy('files/contrail-webui', '/etc/init.d')
- os.chmod('/etc/init.d/contrail-webui', 0o755)
-
-
-def fix_webui_middleware():
- # disable webui middleware upstart service
- if service_available('contrail-webui-jobserver'):
- service_stop('contrail-webui-jobserver')
- with open('/etc/init/contrail-webui-jobserver.override', 'w') as conf:
- conf.write('manual\n')
-
- # use supervisord config
- conf = 'files/contrail-webui-middleware-opencontrail.ini' \
- if is_opencontrail() \
- else 'files/contrail-webui-middleware-contrail.ini'
- shutil.copy(conf, '/etc/contrail/supervisord_webui_files/contrail-webui-middleware.ini')
- pw = pwd.getpwnam('contrail')
- os.chown('/etc/contrail/supervisord_webui_files/contrail-webui-middleware.ini',
- pw.pw_uid, pw.pw_gid)
- shutil.copy('files/contrail-webui-middleware', '/etc/init.d')
- os.chmod('/etc/init.d/contrail-webui-middleware', 0o755)
-
-
-def web_install_dir():
- return '/var/lib/contrail-webui' \
- if is_opencontrail() \
- else '/usr/src/contrail'
-
-
-def write_ssl_certificate(cert, key):
- base = web_install_dir()
- with open(base + '/contrail-web-core/keys/cs-cert.pem', 'w') as f:
- f.write(cert)
- with open(base + '/contrail-web-core/keys/cs-key.pem', 'w') as f:
- f.write(key)
diff --git a/charms/trusty/contrail-webui/hooks/start b/charms/trusty/contrail-webui/hooks/start
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/start
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/stop b/charms/trusty/contrail-webui/hooks/stop
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/stop
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/upgrade-charm b/charms/trusty/contrail-webui/hooks/upgrade-charm
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/upgrade-charm
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/website-relation-changed b/charms/trusty/contrail-webui/hooks/website-relation-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/website-relation-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/hooks/website-relation-joined b/charms/trusty/contrail-webui/hooks/website-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/contrail-webui/hooks/website-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/contrail-webui/icon.svg b/charms/trusty/contrail-webui/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/contrail-webui/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/contrail-webui/metadata.yaml b/charms/trusty/contrail-webui/metadata.yaml
deleted file mode 100644
index d1e1a33..0000000
--- a/charms/trusty/contrail-webui/metadata.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-name: contrail-webui
-summary: OpenContrail WebUI Node
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- OpenContrail is a network virtualization solution that provides an overlay
- virtual-network to virtual-machines, containers or network namespaces.
- .
- This charm provides the Web UI node component.
-tags:
- - openstack
-requires:
- cassandra:
- interface: cassandra
- contrail_api:
- interface: contrail-api
- contrail_discovery:
- interface: contrail-discovery
- identity_admin:
- interface: keystone-admin
- redis:
- interface: redis
-provides:
- website:
- interface: http
diff --git a/charms/trusty/contrail-webui/templates/config.global.js.j2 b/charms/trusty/contrail-webui/templates/config.global.js.j2
deleted file mode 100644
index 50a6d02..0000000
--- a/charms/trusty/contrail-webui/templates/config.global.js.j2
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
- */
-
-var config = {};
-
-config.orchestration = {};
-/****************************************************************************
- * Specify Orchestration Model
- * Available models are:
- * - openstack
- * - cloudstack
- * If you do not want to specify any model, set it to 'none'
- *
- *****************************************************************************/
-config.orchestration.Manager = 'openstack';
-
-/****************************************************************************
- * This boolean flag indicates to communicate with Orchestration
- * modules(networkManager, imageManager, computeManager, identityManager,
- * storageManager), should the webServer communicate using the
- * ip/port/authProtocol/apiVersion as specified in this file, or as returned
- * from auth catalog list.
- * Note: config.identityManager.apiVersion is not controlled by this boolean
- * flag.
- *
- * true - These values should be taken from this config
- * file.
- * false - These values should be taken from auth catalog list
- *
- *****************************************************************************/
-config.serviceEndPointFromConfig = true;
-
-/****************************************************************************
- * This boolean flag specifies wheather region list should be taken from config
- * file or from keystone endpoint
- * true - If set as true, then keystone endpoint is taken from
- * config.regions
- * false - If set as false, then keystone endpoint is taken from
- * config.identityManager
- *
- ****************************************************************************/
-config.regionsFromConfig = false;
-
-/****************************************************************************
- * Below are the configs for Api Server and analytics Service type & name as
- * provisioned in keystone
- *
- * apiServiceType - Service Type for apiServer, default value is apiServer
- * opServiceType - Service Type for analytics, default value is opServer
- *
- * Note: If there are multiple api server or analytices nodes in a specific
- * region, then provision service type/name as ApiServer0, ApiServer1,
- * ApiServer2 etc, similarly for analytics node: OpServer0, OpServer1,
- * OpServer2 etc.
- *
- ****************************************************************************/
-config.endpoints = {};
-config.endpoints.apiServiceType = 'ApiServer';
-config.endpoints.opServiceType = 'OpServer';
-
-/****************************************************************************
- * Mapping to region name with keystone endpoint
- *
- * For example:
- * config.regions.RegionOne = 'http://nodeIp:5000/v2.0';
- * config.regions.RegionTwo = 'http://nodeIp:5000/v3';
- *
- ****************************************************************************/
-config.regions = {};
-config.regions.RegionOne = 'http://{{ identity_admin[0]['service_hostname'] }}:{{ identity_admin[0]['service_port'] }}/v2.0';
-
-/****************************************************************************
- * This boolean flag indicates if serviceEndPointFromConfig is set as false,
- * then to take IP/Port/Protocol/Version information from auth catalog,
- * should publicURL OR internalURL will be used.
- *
- * true - publicURL in endpoint will be used to retrieve IP/Port/Protocol/
- * Version information
- * false - internalURL in endpoint will be used to retrieve
- * IP/Port/Protocol/Version information
- *
- * NOTE: if config.serviceEndPointFromConfig is set as true, then this flag
- * does not have any effect.
- *
- *****************************************************************************/
-config.serviceEndPointTakePublicURL = true;
-
-/****************************************************************************
- * Below are the config options for all Orchestration Modules below:
- * - networkManager
- * - imageManager
- * - computeManager
- * - identityManager
- * - storageManager
- * - cnfg
- * - analytics
- *
- * Options:
- * ip:
- * IP to connect to for this Server.
- * port:
- * Port to connect to for this server
- * authProtocol:
- * Specify authProtocol either 'http' or 'https'
- * apiVersion:
- * REST API Version for this server to connect to.
- * Specify a list of Versions in array notation.
- * Below are the supported list of apiVersion for the modules as of now:
- * imageManager - ['v1', 'v2']
- * computeManager - ['v1.1', 'v2']
- * identityManager - ['v2.0']
- * storageManager - ['v1']
- *
- * Not applicable for cnfg/analytics as of now
- * strictSSL:
- * If true, requires certificates to be valid
- * ca:
- * An authority certificate to check the remote host against,
- * if you do not want to specify then use ''
- *****************************************************************************/
-config.networkManager = {};
-config.networkManager.ip = '127.0.0.1';
-config.networkManager.port = '9696'
-config.networkManager.authProtocol = 'http';
-config.networkManager.apiVersion = [];
-config.networkManager.strictSSL = false;
-config.networkManager.ca = '';
-
-config.imageManager = {};
-config.imageManager.ip = '127.0.0.1';
-config.imageManager.port = '9292';
-config.imageManager.authProtocol = 'http';
-config.imageManager.apiVersion = ['v1', 'v2'];
-config.imageManager.strictSSL = false;
-config.imageManager.ca = '';
-
-config.computeManager = {};
-config.computeManager.ip = '127.0.0.1';
-config.computeManager.port = '8774';
-config.computeManager.authProtocol = 'http';
-config.computeManager.apiVersion = ['v1.1', 'v2'];
-config.computeManager.strictSSL = false;
-config.computeManager.ca = '';
-
-config.identityManager = {};
-config.identityManager.ip = '{{ identity_admin[0]['service_hostname'] }}';
-config.identityManager.port = '{{ identity_admin[0]['service_port'] }}';
-config.identityManager.authProtocol = 'http';
-/******************************************************************************
- * Note: config.identityManager.apiVersion is not controlled by boolean flag
- * config.serviceEndPointFromConfig. If specified apiVersion here, then these
- * API versions will be used while using REST API to identityManager.
- * If want to use with default apiVersion(v2.0), then can specify it as
- * empty array.
- ******************************************************************************/
-config.identityManager.apiVersion = ['v2.0'];
-config.identityManager.strictSSL = false;
-config.identityManager.ca = '';
-
-config.storageManager = {};
-config.storageManager.ip = '127.0.0.1';
-config.storageManager.port = '8776';
-config.storageManager.authProtocol = 'http';
-config.storageManager.apiVersion = ['v1'];
-config.storageManager.strictSSL = false;
-config.storageManager.ca = '';
-
-// VNConfig API server and port.
-config.cnfg = {};
-config.cnfg.server_ip = '{{ contrail_api[0]['vip'] if contrail_api[0]['vip'] else contrail_api[0]['private-address'] }}';
-config.cnfg.server_port = '{{ contrail_api[0]['port'] }}';
-config.cnfg.authProtocol = 'http';
-config.cnfg.strictSSL = false;
-config.cnfg.ca = '';
-
-// Analytics API server and port.
-config.analytics = {};
-config.analytics.server_ip = '127.0.0.1';
-config.analytics.server_port = '8081';
-config.analytics.authProtocol = 'http';
-config.analytics.strictSSL = false;
-config.analytics.ca = '';
-
-// vcenter related parameters
-config.vcenter = {};
-config.vcenter.server_ip = '127.0.0.1'; //vCenter IP
-config.vcenter.server_port = '443'; //Port
-config.vcenter.authProtocol = 'https'; //http or https
-config.vcenter.datacenter = 'vcenter'; //datacenter name
-config.vcenter.dvsswitch = 'vswitch'; //dvsswitch name
-config.vcenter.strictSSL = false; //Validate the certificate or ignore
-config.vcenter.ca = ''; //specify the certificate key file
-config.vcenter.wsdl = '/var/lib/contrail-webui/contrail-web-core/webroot/js/vim.wsdl';
-
-/* Discovery Service */
-config.discoveryService = {};
-config.discoveryService.server_ip = '{{ contrail_discovery[0]['vip'] if contrail_discovery[0]['vip'] else contrail_discovery[0]['private-address'] }}';
-config.discoveryService.server_port = '{{ contrail_discovery[0]['port'] }}';
-/* Specifiy true if subscription to discovery server should be enabled, else
- * specify false. Other than true/false value here is treated as true
- */
-config.discoveryService.enable = true;
-
-/* Job Server */
-config.jobServer = {};
-config.jobServer.server_ip = '127.0.0.1';
-config.jobServer.server_port = '3000';
-
-/* Upload/Download Directory */
-config.files = {};
-config.files.download_path = '/tmp';
-
-/* Cassandra Server */
-config.cassandra = {};
-config.cassandra.server_ips = ['{{ cassandra|join('\', \'', attribute='private-address') }}'];
-config.cassandra.server_port = '{{ cassandra[0]['rpc_port'] if cassandra[0]['rpc_port'] else cassandra[0]['port'] }}';
-config.cassandra.enable_edit = false;
-
-/* KUE Job Scheduler */
-config.kue = {};
-config.kue.ui_port = '3002'
-
-/* IP List to listen on */
-config.webui_addresses = ['0.0.0.0'];
-
-/* Is insecure access to WebUI?
- * If set as false, then all http request will be redirected
- * to https, if set true, then no https request will be processed, but only http
- * request
- */
-config.insecure_access = {{ ((not config['use-https']) ~ '')|lower }};
-
-// HTTP port for NodeJS Server.
-config.http_port = '{{ config['http-port']|default('8080', true) }}';
-
-// HTTPS port for NodeJS Server.
-config.https_port = '{{ config['https-port']|default('8143', true) }}';
-
-// Activate/Deactivate Login.
-config.require_auth = false;
-
-/* Number of node worker processes for cluster. */
-config.node_worker_count = 1;
-
-/* Number of Parallel Active Jobs with same type */
-config.maxActiveJobs = 10;
-
-/* Redis DB index for Web-UI */
-config.redisDBIndex = 3;
-
-/* WebUI Redis Server */
-config.redis_server_port = '{{ redis[0]['port'] }}';
-config.redis_server_ip = '{{ redis[0]['hostname'] }}';
-config.redis_dump_file = '/var/lib/redis/dump-webui.rdb';
-config.redis_password = '';
-
-/* Logo File: Use complete path of logo file location */
-config.logo_file = '{{ config['logo-filename'] if config['logo-filename'] else logo_file }}';
-
-/* Favicon File: Use complete path of favicon file location */
-config.favicon_file = '{{ config['favicon-filename'] if config['favicon-filename'] else favicon_file }}';
-
-config.featurePkg = {};
-/* Add new feature Package Config details below */
-config.featurePkg.webController = {};
-config.featurePkg.webController.path = '{{ webcontroller_path }}';
-config.featurePkg.webController.enable = true;
-
-/* Enable/disable Stat Query Links in Sidebar*/
-config.qe = {};
-config.qe.enable_stat_queries = false;
-
-/* Configure level of logs, supported log levels are:
- debug, info, notice, warning, error, crit, alert, emerg
- */
-config.logs = {};
-config.logs.level = 'debug';
-
-/******************************************************************************
- * Boolean flag getDomainProjectsFromApiServer indicates wheather the project
- * list should come from API Server or Identity Manager.
- * If Set
- * - true, then project list will come from API Server
- * - false, then project list will come from Identity Manager
- * Default: false
- *
- ******************************************************************************/
-config.getDomainProjectsFromApiServer = false;
-/*****************************************************************************
- * Boolean flag L2_enable indicates the default forwarding-mode of a network.
- * Allowed values : true / false
- * Set this flag to true if all the networks are to be L2 networks,
- * set to false otherwise.
- *****************************************************************************/
-config.network = {};
-config.network.L2_enable = false;
-
-/******************************************************************************
- * Boolean flag getDomainsFromApiServer indicates wheather the domain
- * list should come from API Server or Identity Manager.
- * If Set
- * - true, then domain list will come from API Server
- * - false, then domain list will come from Identity Manager
- * Default: true
- * NOTE: if config.identityManager.apiVersion is set as v2.0, then this flag
- * does not have any effect, in that case the domain list is retrieved
- * from API Server.
- *
- *****************************************************************************/
-config.getDomainsFromApiServer = false;
-
-// Export this as a module.
-module.exports = config;
-
diff --git a/charms/trusty/contrail-webui/templates/contrail-webui-userauth.js b/charms/trusty/contrail-webui/templates/contrail-webui-userauth.js
deleted file mode 100644
index f37a348..0000000
--- a/charms/trusty/contrail-webui/templates/contrail-webui-userauth.js
+++ /dev/null
@@ -1,7 +0,0 @@
-var auth = {};
-auth.admin_user = '{{ identity_admin[0]['service_username'] }}';
-auth.admin_password = '{{ identity_admin[0]['service_password'] }}';
-auth.admin_tenant_name = '{{ identity_admin[0]['service_tenant_name'] }}';
-
-module.exports = auth;
-
diff --git a/charms/trusty/kafka/.bzr/README b/charms/trusty/kafka/.bzr/README
deleted file mode 100644
index f82dc1c..0000000
--- a/charms/trusty/kafka/.bzr/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a Bazaar control directory.
-Do not change any files in this directory.
-See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/kafka/.bzr/branch-format b/charms/trusty/kafka/.bzr/branch-format
deleted file mode 100644
index 9eb09b7..0000000
--- a/charms/trusty/kafka/.bzr/branch-format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/kafka/.bzr/branch/format b/charms/trusty/kafka/.bzr/branch/format
deleted file mode 100644
index b391ffd..0000000
--- a/charms/trusty/kafka/.bzr/branch/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG Branch Reference Format 1
diff --git a/charms/trusty/kafka/.bzr/branch/location b/charms/trusty/kafka/.bzr/branch/location
deleted file mode 100644
index 2f40eef..0000000
--- a/charms/trusty/kafka/.bzr/branch/location
+++ /dev/null
@@ -1 +0,0 @@
-http://bazaar.launchpad.net/~sdn-charmers/charms/trusty/apache-kafka/trunk/ \ No newline at end of file
diff --git a/charms/trusty/kafka/.bzr/checkout/conflicts b/charms/trusty/kafka/.bzr/checkout/conflicts
deleted file mode 100644
index 0dc2d3a..0000000
--- a/charms/trusty/kafka/.bzr/checkout/conflicts
+++ /dev/null
@@ -1 +0,0 @@
-BZR conflict list format 1
diff --git a/charms/trusty/kafka/.bzr/checkout/dirstate b/charms/trusty/kafka/.bzr/checkout/dirstate
deleted file mode 100644
index 9d2ef8a..0000000
--- a/charms/trusty/kafka/.bzr/checkout/dirstate
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/kafka/.bzr/checkout/format b/charms/trusty/kafka/.bzr/checkout/format
deleted file mode 100644
index e0261c7..0000000
--- a/charms/trusty/kafka/.bzr/checkout/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/kafka/.bzr/checkout/views b/charms/trusty/kafka/.bzr/checkout/views
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/kafka/.bzr/checkout/views
+++ /dev/null
diff --git a/charms/trusty/kafka/LICENSE b/charms/trusty/kafka/LICENSE
deleted file mode 100644
index f433b1a..0000000
--- a/charms/trusty/kafka/LICENSE
+++ /dev/null
@@ -1,177 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/charms/trusty/kafka/README.md b/charms/trusty/kafka/README.md
deleted file mode 100644
index 31e8e4f..0000000
--- a/charms/trusty/kafka/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-## Overview
-Apache Kafka is an open-source message broker project developed by the Apache
-Software Foundation written in Scala. The project aims to provide a unified,
-high-throughput, low-latency platform for handling real-time data feeds. Learn
-more at [kafka.apache.org](http://kafka.apache.org/).
-
-
-## Usage
-Kafka requires the Zookeeper distributed coordination service. Deploy and
-relate them as follows:
-
- juju deploy apache-zookeeper zookeeper
- juju deploy apache-kafka kafka
- juju add-relation kafka zookeeper
-
-Once deployed, we can list the zookeeper servers that our kafka brokers
-are connected to. The following will list `<ip>:<port>` information for each
-zookeeper unit in the environment (e.g.: `10.0.3.221:2181`).
-
- juju action do kafka/0 list-zks
- juju action fetch <id> # <-- id from above command
-
-We can create a Kafka topic with:
-
- juju action do kafka/0 create-topic topic=<topic_name> \
- partitions=<#> replication=<#>
- juju action fetch <id> # <-- id from above command
-
-We can list topics with:
-
- juju action do kafka/0 list-topics
- juju action fetch <id> # <-- id from above command
-
-We can write to a topic with:
-
- juju action do kafka/0 write-topic topic=<topic_name> data=<data>
- juju action fetch <id> # <-- id from above command
-
-We can read from a topic with:
-
- juju action do kafka/0 read-topic topic=<topic_name> partition=<#>
- juju action fetch <id> # <-- id from above command
-
-And finally, we can delete a topic with:
-
- juju action do kafka/0 delete-topic topic=<topic_name>
- juju action fetch <id> # <-- id from above command
-
-## Deploying in Network-Restricted Environments
-This charm can be deployed in environments with limited network access. To
-deploy in this environment, you will need a local mirror to serve the packages
-and resources required by this charm.
-
-### Mirroring Packages
-You can setup a local mirror for apt packages using squid-deb-proxy.
-For instructions on configuring juju to use this, see the
-[Juju Proxy Documentation](https://juju.ubuntu.com/docs/howto-proxies.html).
-
-### Mirroring Resources
-In addition to apt packages, this charm requires a few binary resources
-which are normally hosted on Launchpad. If access to Launchpad is not
-available, the `jujuresources` library makes it easy to create a mirror
-of these resources:
-
- sudo pip install jujuresources
- juju-resources fetch --all /path/to/resources.yaml -d /tmp/resources
- juju-resources serve -d /tmp/resources
-
-This will fetch all of the resources needed by this charm and serve them via a
-simple HTTP server. The output from `juju-resources serve` will give you a
-URL that you can set as the `resources_mirror` config option for this charm.
-Setting this option will cause all resources required by this charm to be
-downloaded from the configured URL.
-
-
-## Contact Information
-- <bigdata-dev@lists.launchpad.net>
-
-
-## Help
-- [Apache Kafka home page](http://kafka.apache.org/)
-- [Apache Kafka issue tracker](https://issues.apache.org/jira/browse/KAFKA)
-- [Juju mailing list](https://lists.ubuntu.com/mailman/listinfo/juju)
-- [Juju community](https://jujucharms.com/community)
diff --git a/charms/trusty/kafka/actions.yaml b/charms/trusty/kafka/actions.yaml
deleted file mode 100644
index 6026d0b..0000000
--- a/charms/trusty/kafka/actions.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-create-topic:
- description: Create a new Kafka topic
- params:
- topic:
- type: string
- description: Topic name
- partitions:
- type: integer
- description: Number of partitions for the topic being created
- replication:
- type: integer
- description: Replication factor for each partition in the topic
- required: [topic, partitions, replication]
- additionalProperties: false
-delete-topic:
- description: Delete a Kafka topic
- params:
- topic:
- type: string
- description: Topic name
- required: [topic]
- additionalProperties: false
-list-topics:
- description: List all Kafka topics
-list-zks:
- description: List ip:port info for connected Zookeeper servers
-read-topic:
- description: Consume an existing kafka topic
- params:
- topic:
- type: string
- description: Topic name
- partition:
- type: integer
- description: Partition to consume
- required: [topic, partition]
- additionalProperties: false
-write-topic:
- description: Write to a kafka topic
- params:
- topic:
- type: string
- description: Topic name
- data:
- type: string
- description: Data to write to topic
- required: [topic, data]
- additionalProperties: false
diff --git a/charms/trusty/kafka/actions/create-topic b/charms/trusty/kafka/actions/create-topic
deleted file mode 100755
index 4910430..0000000
--- a/charms/trusty/kafka/actions/create-topic
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-import sys
-
-try:
- from charmhelpers.core import hookenv
- from charmhelpers.core import unitdata
- from jujubigdata import utils
- from jujubigdata.relations import Zookeeper
- charm_ready = unitdata.kv().get('charm.active', False)
-except ImportError:
- charm_ready = False
-
-if not charm_ready:
- # might not have hookenv.action_fail available yet
- from subprocess import call
- call(['action-fail', 'Kafka service not yet ready'])
-
-# Grab the business
-topic_name = hookenv.action_get('topic')
-topic_partitions = hookenv.action_get('partitions')
-topic_replication = hookenv.action_get('replication')
-
-# Create the topic if we've got zookeepers; otherwise fail.
-if Zookeeper().connected_units() and Zookeeper().is_ready():
- zks = []
- for unit, data in Zookeeper().filtered_data().items():
- ip = utils.resolve_private_address(data['private-address'])
- zks.append("%s:%s" % (ip, data['port']))
- zks.sort()
- zookeepers = ",".join(zks)
- output = utils.run_as('kafka', 'kafka-topics.sh',
- '--zookeeper', zookeepers, '--create',
- '--topic', topic_name,
- '--partitions', topic_partitions,
- '--replication-factor', topic_replication,
- capture_output=True)
- hookenv.action_set({'output': output})
-else:
- hookenv.action_fail('Zookeeper relation is not present/ready')
- sys.exit()
diff --git a/charms/trusty/kafka/actions/delete-topic b/charms/trusty/kafka/actions/delete-topic
deleted file mode 100755
index b56f004..0000000
--- a/charms/trusty/kafka/actions/delete-topic
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-import sys
-
-try:
- from charmhelpers.core import hookenv
- from charmhelpers.core import unitdata
- from jujubigdata import utils
- from jujubigdata.relations import Zookeeper
- charm_ready = unitdata.kv().get('charm.active', False)
-except ImportError:
- charm_ready = False
-
-if not charm_ready:
- # might not have hookenv.action_fail available yet
- from subprocess import call
- call(['action-fail', 'Kafka service not yet ready'])
-
-# Grab the business
-topic_name = hookenv.action_get('topic')
-
-# Delete the topic if we've got zookeepers; otherwise fail.
-if Zookeeper().connected_units() and Zookeeper().is_ready():
- zks = []
- for unit, data in Zookeeper().filtered_data().items():
- ip = utils.resolve_private_address(data['private-address'])
- zks.append("%s:%s" % (ip, data['port']))
- zks.sort()
- zookeepers = ",".join(zks)
- output = utils.run_as('kafka', 'kafka-topics.sh',
- '--zookeeper', zookeepers, '--delete',
- '--topic', topic_name,
- capture_output=True)
- hookenv.action_set({'output': output})
-else:
- hookenv.action_fail('Zookeeper relation is not present/ready')
- sys.exit()
diff --git a/charms/trusty/kafka/actions/list-topics b/charms/trusty/kafka/actions/list-topics
deleted file mode 100755
index 629d2b4..0000000
--- a/charms/trusty/kafka/actions/list-topics
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-import sys
-
-try:
- from charmhelpers.core import hookenv
- from charmhelpers.core import unitdata
- from jujubigdata import utils
- from jujubigdata.relations import Zookeeper
- charm_ready = unitdata.kv().get('charm.active', False)
-except ImportError:
- charm_ready = False
-
-if not charm_ready:
- # might not have hookenv.action_fail available yet
- from subprocess import call
- call(['action-fail', 'Kafka service not yet ready'])
-
-if Zookeeper().connected_units() and Zookeeper().is_ready():
- zks = []
- for unit, data in Zookeeper().filtered_data().items():
- ip = utils.resolve_private_address(data['private-address'])
- zks.append("%s:%s" % (ip, data['port']))
- zks.sort()
- zookeepers = ",".join(zks)
- output = utils.run_as('kafka', '/usr/lib/kafka/bin/kafka-topics.sh',
- '--zookeeper', zookeepers, '--list',
- capture_output=True)
- hookenv.action_set({'topics': output})
-else:
- hookenv.action_fail('Zookeeper relation is not present/ready')
- sys.exit()
diff --git a/charms/trusty/kafka/actions/list-zks b/charms/trusty/kafka/actions/list-zks
deleted file mode 100755
index 9de9e9a..0000000
--- a/charms/trusty/kafka/actions/list-zks
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env python
-import sys
-
-try:
- from charmhelpers.core import hookenv
- from charmhelpers.core import unitdata
- from jujubigdata import utils
- from jujubigdata.relations import Zookeeper
- charm_ready = unitdata.kv().get('charm.active', False)
-except ImportError:
- charm_ready = False
-
-if not charm_ready:
- # might not have hookenv.action_fail available yet
- from subprocess import call
- call(['action-fail', 'Kafka service not yet ready'])
-
-if Zookeeper().connected_units() and Zookeeper().is_ready():
- zks = []
- for unit, data in Zookeeper().filtered_data().items():
- ip = utils.resolve_private_address(data['private-address'])
- zks.append("%s:%s" % (ip, data['port']))
- zks.sort()
- zookeepers = ",".join(zks)
- hookenv.action_set({'zookeepers': zookeepers})
-else:
- hookenv.action_fail('Zookeeper relation is not present/ready')
- sys.exit()
diff --git a/charms/trusty/kafka/actions/read-topic b/charms/trusty/kafka/actions/read-topic
deleted file mode 100755
index 9f59396..0000000
--- a/charms/trusty/kafka/actions/read-topic
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-#pylint: disable=C0103
-try:
- from charmhelpers.core import hookenv
- from charmhelpers.core import unitdata
- import jujubigdata
- from jujubigdata import utils
- charm_ready = unitdata.kv().get('charm.active', False)
-except ImportError:
- charm_ready = False
-
-if not charm_ready:
- # might not have hookenv.action_fail available yet
- from subprocess import call
- call(['action-fail', 'Kafka service not yet ready'])
-
-kafka_reqs = ['vendor', 'packages', 'groups', 'users', 'dirs', 'ports']
-dist_config = jujubigdata.utils.DistConfig(filename='dist.yaml',
- required_keys=kafka_reqs)
-
-# Grab the business
-topic_name = hookenv.action_get('topic')
-topic_partition = hookenv.action_get('partition')
-
-output = utils.run_as(
- 'kafka', 'kafka-simple-consumer-shell.sh',
- '--broker-list', '{}:{}'.format(
- hookenv.unit_private_ip(),
- dist_config.port('kafka'),
- ),
- '--topic', topic_name,
- '--partition', topic_partition,
- '--no-wait-at-logend',
- capture_output=True)
-hookenv.action_set({'output': output})
diff --git a/charms/trusty/kafka/actions/write-topic b/charms/trusty/kafka/actions/write-topic
deleted file mode 100755
index fce8e1b..0000000
--- a/charms/trusty/kafka/actions/write-topic
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-#pylint: disable=C0103
-
-try:
- from charmhelpers.core import hookenv
- from charmhelpers.core import unitdata
- import jujubigdata
- from jujubigdata import utils
- charm_ready = unitdata.kv().get('charm.active', False)
-except ImportError:
- charm_ready = False
-
-if not charm_ready:
- # might not have hookenv.action_fail available yet
- from subprocess import call
- call(['action-fail', 'Kafka service not yet ready'])
-
-
-kafka_reqs = ['vendor', 'packages', 'groups', 'users', 'dirs', 'ports']
-dist_config = jujubigdata.utils.DistConfig(filename='dist.yaml',
- required_keys=kafka_reqs)
-
-# Grab the business
-topic_name = hookenv.action_get('topic')
-data = hookenv.action_get('data')
-
-output = utils.run_as(
- 'kafka', 'kafka-console-producer.sh',
- '--broker-list', '{}:{}'.format(
- hookenv.unit_private_ip(),
- dist_config.port('kafka'),
- ),
- '--topic', topic_name,
- capture_output=True,
- input=data)
-hookenv.action_set({'output': output})
diff --git a/charms/trusty/kafka/config.yaml b/charms/trusty/kafka/config.yaml
deleted file mode 100644
index f2483b0..0000000
--- a/charms/trusty/kafka/config.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-options:
- resources_mirror:
- type: string
- default: ''
- description: |
- URL from which to fetch resources (e.g., Hadoop binaries) instead of Launchpad.
-
diff --git a/charms/trusty/kafka/copyright b/charms/trusty/kafka/copyright
deleted file mode 100644
index e900b97..0000000
--- a/charms/trusty/kafka/copyright
+++ /dev/null
@@ -1,16 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
-License: Apache License 2.0
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
- .
- http://www.apache.org/licenses/LICENSE-2.0
- .
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/charms/trusty/kafka/dist.yaml b/charms/trusty/kafka/dist.yaml
deleted file mode 100644
index e8f1a37..0000000
--- a/charms/trusty/kafka/dist.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-# This file contains values that are likely to change per distribution.
-# The aim is to make it easier to update / extend the charms with
-# minimal changes to the shared code in the jujubigdata library.
-vendor: 'apache'
-packages:
- - 'openjdk-7-jdk'
-groups:
- - 'hadoop'
-users:
- kafka:
- groups: ['hadoop']
-dirs:
- kafka:
- path: '/usr/lib/kafka'
- kafka_conf:
- path: '/etc/kafka/conf'
- kafka_app_logs:
- path: '/var/log/kafka'
- owner: 'kafka'
- kafka_data_logs:
- path: '/var/lib/kafka'
- owner: 'kafka'
-ports:
- # Ports that need to be exposed, overridden, or manually specified.
- # Only expose ports serving a UI or external API (i.e., namenode and
- # resourcemanager). Communication among units within the cluster does
- # not need ports to be explicitly opened.
- kafka:
- port: 9092
- exposed_on: 'kafka'
diff --git a/charms/trusty/kafka/hooks/callbacks.py b/charms/trusty/kafka/hooks/callbacks.py
deleted file mode 100644
index bac68cb..0000000
--- a/charms/trusty/kafka/hooks/callbacks.py
+++ /dev/null
@@ -1,181 +0,0 @@
-import os
-from socket import gaierror, gethostbyname, gethostname
-from subprocess import Popen, check_call
-
-import jujuresources
-from charmhelpers.core import hookenv, templating
-from charmhelpers.core import host
-from charmhelpers.core import unitdata
-from jujubigdata import utils
-from jujubigdata.relations import Zookeeper
-
-
-# Extended status support
-# We call update_blocked_status from the "requires" section of our service
-# block, so be sure to return True. Otherwise, we'll block the "requires"
-# and never move on to callbacks. The other status update methods are called
-# from the "callbacks" section and therefore don't need to return True.
-def update_blocked_status():
- if unitdata.kv().get('charm.active', False):
- return True
- if not Zookeeper().connected_units():
- hookenv.status_set('blocked', 'Waiting for relation to apache-zookeeper')
- elif not Zookeeper().is_ready():
- hookenv.status_set('waiting', 'Waiting for Zookeeper to become ready')
- return True
-
-
-def update_working_status():
- if unitdata.kv().get('charm.active', False):
- hookenv.status_set('maintenance', 'Updating configuration')
- return
- hookenv.status_set('maintenance', 'Setting up Apache Kafka')
-
-
-def update_active_status():
- unitdata.kv().set('charm.active', True)
- hookenv.status_set('active', 'Ready')
-
-
-def clear_active_flag():
- unitdata.kv().set('charm.active', False)
-
-
-# Main Kafka class for callbacks
-class Kafka(object):
- def __init__(self, dist_config):
- self.dist_config = dist_config
- self.resources = {
- 'kafka': 'kafka-%s' % host.cpu_arch(),
- }
- self.verify_resources = utils.verify_resources(*self.resources.values())
-
- def fix_hostname(self):
- # ensure hostname is resolvable
- hostname = gethostname()
- try:
- gethostbyname(hostname)
- except gaierror:
- check_call(['sed', '-E', '-i', '-e',
- '/127.0.0.1[[:blank:]]+/a \\\n127.0.1.1 ' + hostname,
- '/etc/hosts'])
-
- def is_installed(self):
- return unitdata.kv().get('kafka.installed')
-
- def install(self, force=False):
- if not force and self.is_installed():
- return
- self.fix_hostname()
- self.dist_config.add_users()
- self.dist_config.add_dirs()
- self.dist_config.add_packages()
- jujuresources.install(self.resources['kafka'],
- destination=self.dist_config.path('kafka'),
- skip_top_level=True)
- self.setup_kafka_config()
- unitdata.kv().set('kafka.installed', True)
-
- def setup_kafka_config(self):
- '''
- copy the default configuration files to kafka_conf property
- defined in dist.yaml
- '''
- default_conf = self.dist_config.path('kafka') / 'config'
- kafka_conf = self.dist_config.path('kafka_conf')
- kafka_conf.rmtree_p()
- default_conf.copytree(kafka_conf)
- # Now remove the conf included in the tarball and symlink our real conf
- # dir. we've seen issues where kafka still looks for config in
- # KAFKA_HOME/config.
- default_conf.rmtree_p()
- kafka_conf.symlink(default_conf)
-
- # Configure immutable bits
- kafka_bin = self.dist_config.path('kafka') / 'bin'
- with utils.environment_edit_in_place('/etc/environment') as env:
- if kafka_bin not in env['PATH']:
- env['PATH'] = ':'.join([env['PATH'], kafka_bin])
- env['LOG_DIR'] = self.dist_config.path('kafka_app_logs')
-
- # note: we set the advertised.host.name below to the public_address
- # to ensure that external (non-Juju) clients can connect to Kafka
- public_address = hookenv.unit_get('public-address')
- private_ip = utils.resolve_private_address(hookenv.unit_get('private-address'))
- kafka_server_conf = self.dist_config.path('kafka_conf') / 'server.properties'
- service, unit_num = os.environ['JUJU_UNIT_NAME'].split('/', 1)
- utils.re_edit_in_place(kafka_server_conf, {
- r'^broker.id=.*': 'broker.id=%s' % unit_num,
- r'^port=.*': 'port=%s' % self.dist_config.port('kafka'),
- r'^log.dirs=.*': 'log.dirs=%s' % self.dist_config.path('kafka_data_logs'),
- r'^#?advertised.host.name=.*': 'advertised.host.name=%s' % public_address,
- })
-
- kafka_log4j = self.dist_config.path('kafka_conf') / 'log4j.properties'
- utils.re_edit_in_place(kafka_log4j, {
- r'^kafka.logs.dir=.*': 'kafka.logs.dir=%s' % self.dist_config.path('kafka_app_logs'),
- })
-
- # fix for lxc containers and some corner cases in manual provider
- # ensure that public_address is resolvable internally by mapping it to the private IP
- utils.update_etc_hosts({private_ip: public_address})
-
- templating.render(
- 'upstart.conf',
- '/etc/init/kafka.conf',
- context={
- 'kafka_conf': self.dist_config.path('kafka_conf'),
- 'kafka_bin': '{}/bin'.format(self.dist_config.path('kafka'))
- },
- )
-
- def configure_kafka(self):
- # Get ip:port data from our connected zookeepers
- if Zookeeper().connected_units() and Zookeeper().is_ready():
- zks = []
- for unit, data in Zookeeper().filtered_data().items():
- ip = utils.resolve_private_address(data['private-address'])
- zks.append("%s:%s" % (ip, data['port']))
- zks.sort()
- zk_connect = ",".join(zks)
-
- # update consumer props
- cfg = self.dist_config.path('kafka_conf') / 'consumer.properties'
- utils.re_edit_in_place(cfg, {
- r'^zookeeper.connect=.*': 'zookeeper.connect=%s' % zk_connect,
- })
-
- # update server props
- cfg = self.dist_config.path('kafka_conf') / 'server.properties'
- utils.re_edit_in_place(cfg, {
- r'^zookeeper.connect=.*': 'zookeeper.connect=%s' % zk_connect,
- })
- else:
- # if we have no zookeepers, make sure kafka is stopped
- self.stop()
-
- def run_bg(self, user, command, *args):
- """
- Run a Kafka command as the `kafka` user in the background.
-
- :param str command: Command to run
- :param list args: Additional args to pass to the command
- """
- parts = [command] + list(args)
- quoted = ' '.join("'%s'" % p for p in parts)
- e = utils.read_etc_env()
- Popen(['su', user, '-c', quoted], env=e)
-
- def restart(self):
- self.stop()
- self.start()
-
- def start(self):
- host.service_start('kafka')
-
- def stop(self):
- host.service_stop('kafka')
-
- def cleanup(self):
- self.dist_config.remove_users()
- self.dist_config.remove_dirs()
diff --git a/charms/trusty/kafka/hooks/common.py b/charms/trusty/kafka/hooks/common.py
deleted file mode 100755
index 1ca080b..0000000
--- a/charms/trusty/kafka/hooks/common.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Common implementation for all hooks.
-"""
-
-import jujuresources
-from charmhelpers.core import hookenv
-from charmhelpers.core import unitdata
-from charmhelpers.core import charmframework
-
-
-def bootstrap_resources():
- """
- Install required resources defined in resources.yaml
- """
- if unitdata.kv().get('charm.bootstrapped', False):
- return True
- hookenv.status_set('maintenance', 'Installing base resources')
- mirror_url = jujuresources.config_get('resources_mirror')
- if not jujuresources.fetch(mirror_url=mirror_url):
- missing = jujuresources.invalid()
- hookenv.status_set('blocked', 'Unable to fetch required resource%s: %s' % (
- 's' if len(missing) > 1 else '',
- ', '.join(missing),
- ))
- return False
- jujuresources.install(['pathlib', 'jujubigdata'])
- unitdata.kv().set('charm.bootstrapped', True)
- return True
-
-
-def manage():
- if not bootstrap_resources():
- # defer until resources are available, since charmhelpers, and thus
- # the framework, are required (will require manual intervention)
- return
-
- import jujubigdata
- import callbacks
-
- kafka_reqs = ['vendor', 'packages', 'groups', 'users', 'dirs', 'ports']
- dist_config = jujubigdata.utils.DistConfig(filename='dist.yaml',
- required_keys=kafka_reqs)
- kafka = callbacks.Kafka(dist_config)
- manager = charmframework.Manager([
- {
- 'name': 'kafka',
- 'provides': [
- jujubigdata.relations.Kafka(port=dist_config.port('kafka'))
- ],
- 'requires': [
- kafka.verify_resources,
- jujubigdata.relations.Zookeeper(),
- callbacks.update_blocked_status, # not really a requirement, but best way to fit into framework
- ],
- 'callbacks': [
- callbacks.update_working_status,
- kafka.install,
- kafka.configure_kafka,
- kafka.restart,
- charmframework.helpers.open_ports(
- dist_config.exposed_ports('kafka')),
- callbacks.update_active_status,
- ],
- 'cleanup': [
- callbacks.clear_active_flag,
- charmframework.helpers.close_ports(
- dist_config.exposed_ports('kafka')),
- kafka.stop,
- kafka.cleanup,
- callbacks.update_blocked_status,
- ],
- },
- ])
- manager.manage()
-
-
-if __name__ == '__main__':
- manage()
diff --git a/charms/trusty/kafka/hooks/config-changed b/charms/trusty/kafka/hooks/config-changed
deleted file mode 100755
index 1b4e92c..0000000
--- a/charms/trusty/kafka/hooks/config-changed
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/hooks/install b/charms/trusty/kafka/hooks/install
deleted file mode 100755
index 7ea6d0f..0000000
--- a/charms/trusty/kafka/hooks/install
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import setup
-setup.pre_install()
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/hooks/kafka-relation-changed b/charms/trusty/kafka/hooks/kafka-relation-changed
deleted file mode 100755
index 1b4e92c..0000000
--- a/charms/trusty/kafka/hooks/kafka-relation-changed
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/hooks/setup.py b/charms/trusty/kafka/hooks/setup.py
deleted file mode 100644
index 496edc6..0000000
--- a/charms/trusty/kafka/hooks/setup.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import subprocess
-from glob import glob
-
-
-def pre_install():
- """
- Do any setup required before the install hook.
- """
- install_pip()
- install_bundled_resources()
-
-
-def install_pip():
- subprocess.check_call(['apt-get', 'install', '-yq', 'python-pip', 'bzr'])
-
-
-def install_bundled_resources():
- """
- Install the bundled resources libraries.
- """
- archives = glob('resources/python/*')
- subprocess.check_call(['pip', 'install'] + archives)
diff --git a/charms/trusty/kafka/hooks/start b/charms/trusty/kafka/hooks/start
deleted file mode 100755
index 1b4e92c..0000000
--- a/charms/trusty/kafka/hooks/start
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/hooks/stop b/charms/trusty/kafka/hooks/stop
deleted file mode 100755
index 1b4e92c..0000000
--- a/charms/trusty/kafka/hooks/stop
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/hooks/zookeeper-relation-changed b/charms/trusty/kafka/hooks/zookeeper-relation-changed
deleted file mode 100755
index 1b4e92c..0000000
--- a/charms/trusty/kafka/hooks/zookeeper-relation-changed
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/hooks/zookeeper-relation-departed b/charms/trusty/kafka/hooks/zookeeper-relation-departed
deleted file mode 100755
index 1b4e92c..0000000
--- a/charms/trusty/kafka/hooks/zookeeper-relation-departed
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import common
-common.manage()
diff --git a/charms/trusty/kafka/icon.svg b/charms/trusty/kafka/icon.svg
deleted file mode 100644
index 1564f99..0000000
--- a/charms/trusty/kafka/icon.svg
+++ /dev/null
@@ -1,90 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="75pt"
- height="117pt"
- viewBox="0 0 75 117"
- version="1.1"
- id="svg3201"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="icon.svg">
- <metadata
- id="metadata3221">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs3219" />
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1236"
- inkscape:window-height="847"
- id="namedview3217"
- showgrid="false"
- inkscape:zoom="4.5641627"
- inkscape:cx="35.369926"
- inkscape:cy="80.60281"
- inkscape:window-x="397"
- inkscape:window-y="78"
- inkscape:window-maximized="0"
- inkscape:current-layer="layer1" />
- <path
- d="m 0,0.04322 76.8,0 0,76.8 -76.8,0 0,-76.8 z"
- id="path3203"
- style="opacity:0;fill:#fffffe;fill-opacity:0.94977172"
- inkscape:connector-curvature="0" />
- <path
- d="M 13.76256,3.6403482 C 20.992,-0.05524154 33.024,1.9665021 36.42368,7.4934765 40.37632,12.889169 34.816,19.735528 25.77408,20.917066 c 0,1.732924 0.01024,3.459282 0,5.185642 4.6592,0.708923 8.87808,2.258051 12.05248,4.56205 2.21184,-0.892717 4.526079,-1.700102 6.72768,-2.605948 -0.59392,-3.209847 -0.59392,-6.833231 3.10272,-9.340718 5.77536,-4.719589 18.51392,-4.476718 23.72608,0.531692 4.751359,4.076308 3.266559,10.450051 -3.21536,13.403898 -6.03136,3.012923 -15.28832,2.271179 -20.41856,-1.266872 -2.16064,0.833641 -4.43392,1.549128 -6.52288,2.454975 -0.307201,1.122461 0.45056,2.251486 0.6144,3.373948 0.70656,1.956103 -0.78848,3.859693 -0.68608,5.809231 2.11968,0.951795 4.47488,1.68041 6.79936,2.415589 6.36928,-4.653948 19.445759,-3.918768 24.10496,1.575386 5.12,5.087179 0.62464,12.668717 -8.52992,14.211281 -6.85056,1.273436 -14.7968,-0.794256 -17.84832,-4.988717 -1.81248,-2.23836 -1.69984,-4.857437 -0.8704,-7.259898 -2.29376,-0.951795 -4.67968,-1.805128 -6.97344,-2.756923 -3.16416,2.284307 -7.34208,3.885949 -12.01152,4.516102 -0.06144,1.746051 -0.04096,3.498667 -0.0512,5.251283 4.78208,0.728615 8.98048,2.921025 10.81344,5.868307 2.75456,4.299487 0.07168,9.872411 -6.51264,12.025436 -6.2464,2.422154 -15.06304,1.234052 -19.33312,-2.546872 -4.89472,-3.846564 -3.84,-10.095589 2.17088,-13.213539 1.91488,-1.102768 4.41344,-1.634461 6.78912,-2.19241 0.03072,-1.68041 0.04096,-3.367384 0.07168,-5.047794 C 14.98112,50.009169 10.21952,48.597887 7.26016,45.919734 1.44384,41.180451 2.2528,33.671118 9.20576,29.581682 c 2.78528,-1.93641 6.77888,-2.776616 10.567679,-3.577436 -0.03072,-1.68041 -0.04096,-3.360821 -0.07168,-5.034667 C 15.59552,20.181887 11.60192,18.678708 9.5641599,16.164655 5.82656,12.081784 7.69024,6.3644508 13.76256,3.6403482 z"
- id="path3205"
- inkscape:connector-curvature="0"
- style="fill:#201f1f" />
- <path
- d="M 18.95424,7.4869124 C 23.58272,5.3338867 30.53568,8.2614765 29.85984,11.799528 29.48096,14.924041 23.5008,17.182092 19.2,15.462297 14.42816,13.926297 14.27456,9.1410662 18.95424,7.4869124 z"
- id="path3207"
- inkscape:connector-curvature="0"
- style="fill:#fffffe" />
- <path
- d="m 55.76704,20.844861 c 4.51584,-1.673846 10.91584,0.761436 10.56768,4.135384 0.235519,3.649642 -7.33184,5.96677 -11.64288,3.557744 -4.106241,-1.897025 -3.38944,-6.255589 1.0752,-7.693128 z"
- id="path3209"
- inkscape:connector-curvature="0"
- style="fill:#fffffe" />
- <path
- d="m 18.78016,32.305784 c 7.311359,-2.303999 16.35328,2.829129 13.8752,7.75877 -1.44384,4.555487 -11.17184,6.721641 -16.5376,3.472409 -6.05184,-2.894768 -4.352,-9.570461 2.6624,-11.231179 z"
- id="path3211"
- inkscape:connector-curvature="0"
- style="fill:#fffffe" />
- <path
- d="m 54.69184,48.348451 c 4.66944,-2.619077 12.759039,0.347897 11.601919,4.253538 -0.409599,3.629949 -8.345599,5.218462 -12.103679,2.651898 -3.2256,-1.772308 -2.8672,-5.303795 0.50176,-6.905436 z"
- id="path3213"
- inkscape:connector-curvature="0"
- style="fill:#fffffe" />
- <path
- d="m 20.67456,61.030297 c 5.10976,-1.13559 10.78272,2.566565 8.82688,5.809231 -1.269761,3.190153 -8.48896,4.483282 -11.84768,1.857641 -4.06528,-2.19241 -2.03776,-6.872615 3.0208,-7.666872 z"
- id="path3215"
- inkscape:connector-curvature="0"
- style="fill:#fffffe" />
- <g
- inkscape:groupmode="layer"
- id="layer1"
- inkscape:label="Alpha"
- style="opacity:1" />
-</svg>
diff --git a/charms/trusty/kafka/metadata.yaml b/charms/trusty/kafka/metadata.yaml
deleted file mode 100644
index 735c20e..0000000
--- a/charms/trusty/kafka/metadata.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-name: apache-kafka
-maintainer: Kevin Monroe <kevin.monroe@canonical.com>
-summary: High-performance pub/sub as distributed commit log.
-description: |
- Fast
- A single Kafka broker can handle hundreds of megabytes of reads and writes per
- second from thousands of clients.
-
- Scalable
- Kafka is designed to allow a single cluster to serve as the central data
- backbone for a large organization. It can be elastically and transparently
- expanded without downtime. Data streams are partitioned and spread over a
- cluster of machines to allow data streams larger than the capability of any
- single machine and to allow clusters of co-ordinated consumers.
-
- Durable
- Messages are persisted on disk and replicated within the cluster to prevent
- data loss. Each broker can handle terabytes of messages without performance
- impact.
-
- Distributed by Design
- Kafka has a modern cluster-centric design that offers strong durability and
- fault-tolerance guarantees.
-tags: ["bigdata", "apache"]
-provides:
- kafka:
- interface: kafka
-requires:
- zookeeper:
- interface: zookeeper
diff --git a/charms/trusty/kafka/resources.yaml b/charms/trusty/kafka/resources.yaml
deleted file mode 100644
index a3b5c74..0000000
--- a/charms/trusty/kafka/resources.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-options:
- output_dir: /home/ubuntu/resources
-resources:
- pathlib:
- pypi: path.py>=7.0
- jujubigdata:
- pypi: jujubigdata>=4.1.0,<5.0.0
-optional_resources:
- kafka-x86_64:
- url: http://mirrors.ibiblio.org/apache/kafka/0.9.0.0/kafka_2.10-0.9.0.0.tgz
- hash: 4bd0a264b84e8d88445b2712dd9e28ac
- hash_type: md5
diff --git a/charms/trusty/kafka/resources/python/PyYAML-3.11.tar.gz b/charms/trusty/kafka/resources/python/PyYAML-3.11.tar.gz
deleted file mode 100644
index 2a5d431..0000000
--- a/charms/trusty/kafka/resources/python/PyYAML-3.11.tar.gz
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/kafka/resources/python/charmhelpers-0.3.1.tar.gz b/charms/trusty/kafka/resources/python/charmhelpers-0.3.1.tar.gz
deleted file mode 100644
index 0ccdbd9..0000000
--- a/charms/trusty/kafka/resources/python/charmhelpers-0.3.1.tar.gz
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/kafka/resources/python/jujuresources-0.2.11.tar.gz b/charms/trusty/kafka/resources/python/jujuresources-0.2.11.tar.gz
deleted file mode 100644
index c491086..0000000
--- a/charms/trusty/kafka/resources/python/jujuresources-0.2.11.tar.gz
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/kafka/resources/python/pyaml-15.5.7.tar.gz b/charms/trusty/kafka/resources/python/pyaml-15.5.7.tar.gz
deleted file mode 100644
index c51f6d1..0000000
--- a/charms/trusty/kafka/resources/python/pyaml-15.5.7.tar.gz
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/kafka/resources/python/six-1.9.0-py2.py3-none-any.whl b/charms/trusty/kafka/resources/python/six-1.9.0-py2.py3-none-any.whl
deleted file mode 100644
index 743ee12..0000000
--- a/charms/trusty/kafka/resources/python/six-1.9.0-py2.py3-none-any.whl
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/kafka/templates/upstart.conf b/charms/trusty/kafka/templates/upstart.conf
deleted file mode 100644
index d24fe8c..0000000
--- a/charms/trusty/kafka/templates/upstart.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-#!upstart
-description "kafka"
-
-start on startup
-stop on shutdown
-
-setuid kafka
-setgid hadoop
-
-respawn
-
-script
-"{{kafka_bin}}/kafka-server-start.sh" "{{kafka_conf}}/server.properties"
-end script
diff --git a/charms/trusty/kafka/tests/00-setup b/charms/trusty/kafka/tests/00-setup
deleted file mode 100755
index 36549ea..0000000
--- a/charms/trusty/kafka/tests/00-setup
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-sudo add-apt-repository ppa:juju/stable -y
-sudo apt-get update
-sudo apt-get install python3 amulet -y
diff --git a/charms/trusty/kafka/tests/100-deploy-kafka b/charms/trusty/kafka/tests/100-deploy-kafka
deleted file mode 100755
index 713a4b4..0000000
--- a/charms/trusty/kafka/tests/100-deploy-kafka
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python3
-import unittest
-import amulet
-
-
-class TestDeploy(unittest.TestCase):
- """
- Deployment test for Apache Kafka
- """
-
- @classmethod
- def setUpClass(cls):
- cls.d = amulet.Deployment(series='trusty')
- # Deploy Kafka Service
- cls.d.add('kafka', charm='cs:~bigdata-dev/trusty/apache-kafka')
- cls.d.add('zookeeper', charm='cs:~bigdata-dev/trusty/apache-zookeeper')
- cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper')
-
- cls.d.setup(timeout=1800)
- cls.d.sentry.wait(timeout=1800)
- cls.unit = cls.d.sentry['kafka'][0]
-
- def test_deploy(self):
- output, retcode = self.unit.run("pgrep -a java")
- assert 'Kafka' in output, "Kafka daemon is not started"
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/charms/trusty/kafka/tests/remote/test_dist_config.py b/charms/trusty/kafka/tests/remote/test_dist_config.py
deleted file mode 100755
index eb2c3aa..0000000
--- a/charms/trusty/kafka/tests/remote/test_dist_config.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-
-import grp
-import os
-import pwd
-import unittest
-
-import jujubigdata
-
-
-class TestDistConfig(unittest.TestCase):
- """
- Test that the ``dist.yaml`` settings were applied properly, such as users, groups, and dirs.
-
- This is done as a remote test on the deployed unit rather than a regular
- test under ``tests/`` because filling in the ``dist.yaml`` requires Juju
- context (e.g., config).
- """
- @classmethod
- def setUpClass(cls):
- config = None
- config_dir = os.environ['JUJU_CHARM_DIR']
- config_file = 'dist.yaml'
- if os.path.isfile(os.path.join(config_dir, config_file)):
- config = os.path.join(config_dir, config_file)
- if not config:
- raise IOError('Could not find {} in {}'.format(config_file, config_dir))
- reqs = ['vendor', 'hadoop_version', 'groups', 'users', 'dirs']
- cls.dist_config = jujubigdata.utils.DistConfig(config, reqs)
-
- def test_groups(self):
- for name in self.dist_config.groups:
- try:
- grp.getgrnam(name)
- except KeyError:
- self.fail('Group {} is missing'.format(name))
-
- def test_users(self):
- for username, details in self.dist_config.users.items():
- try:
- user = pwd.getpwnam(username)
- except KeyError:
- self.fail('User {} is missing'.format(username))
- for groupname in details['groups']:
- try:
- group = grp.getgrnam(groupname)
- except KeyError:
- self.fail('Group {} referenced by user {} does not exist'.format(
- groupname, username))
- if group.gr_gid != user.pw_gid:
- self.assertIn(username, group.gr_mem, 'User {} not in group {}'.format(
- username, groupname))
-
- def test_dirs(self):
- for name, details in self.dist_config.dirs.items():
- dirpath = self.dist_config.path(name)
- self.assertTrue(dirpath.isdir(), 'Dir {} is missing'.format(name))
- stat = dirpath.stat()
- owner = pwd.getpwuid(stat.st_uid).pw_name
- group = grp.getgrgid(stat.st_gid).gr_name
- perms = stat.st_mode & ~0o40000
- self.assertEqual(owner, details.get('owner', 'root'),
- 'Dir {} ({}) has wrong owner: {}'.format(name, dirpath, owner))
- self.assertEqual(group, details.get('group', 'root'),
- 'Dir {} ({}) has wrong group: {}'.format(name, dirpath, group))
- self.assertEqual(perms, details.get('perms', 0o755),
- 'Dir {} ({}) has wrong perms: 0o{:o}'.format(name, dirpath, perms))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/charms/trusty/kafka/tests/tests.yaml b/charms/trusty/kafka/tests/tests.yaml
deleted file mode 100644
index 771f3fd..0000000
--- a/charms/trusty/kafka/tests/tests.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-# Driver for bundletester: https://github.com/juju-solutions/bundletester
-#
-# It may be useful to alter the defaults during manual testing. For example,
-# set 'reset: false' to reuse existing charms instead of redeploying them.
-
-# Allow bootstrap of current env, default: true
-bootstrap: true
-
-# Use juju-deployer to reset env between test, default: true
-reset: true
diff --git a/charms/trusty/keepalived/.project b/charms/trusty/keepalived/.project
deleted file mode 100644
index 21d3bc6..0000000
--- a/charms/trusty/keepalived/.project
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<projectDescription>
- <name>keepalived</name>
- <comment></comment>
- <projects>
- </projects>
- <buildSpec>
- <buildCommand>
- <name>org.python.pydev.PyDevBuilder</name>
- <arguments>
- </arguments>
- </buildCommand>
- </buildSpec>
- <natures>
- <nature>org.python.pydev.pythonNature</nature>
- </natures>
-</projectDescription>
diff --git a/charms/trusty/keepalived/.pydevproject b/charms/trusty/keepalived/.pydevproject
deleted file mode 100644
index 509d596..0000000
--- a/charms/trusty/keepalived/.pydevproject
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<?eclipse-pydev version="1.0"?><pydev_project>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
-<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
-<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
-<path>/${PROJECT_DIR_NAME}/hooks</path>
-</pydev_pathproperty>
-</pydev_project>
diff --git a/charms/trusty/keepalived/README.md b/charms/trusty/keepalived/README.md
deleted file mode 100644
index baf74fa..0000000
--- a/charms/trusty/keepalived/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-Overview
---------
-
-Keepalived is a routing software written in C. The main goal of this project is
-to provide simple and robust facilities for loadbalancing and high-availability
-to Linux system and Linux based infrastructures. Loadbalancing framework relies
-on well-known and widely used Linux Virtual Server (IPVS) kernel module
-providing Layer4 loadbalancing. Keepalived implements a set of checkers to
-dynamically and adaptively maintain and manage loadbalanced server pool
-according their health. On the other hand high-availability is achieved by VRRP
-protocol. VRRP is a fundamental brick for router failover. In addition,
-Keepalived implements a set of hooks to the VRRP finite state machine providing
-low-level and high-speed protocol interactions. Keepalived frameworks can be
-used independently or all together to provide resilient infrastructures.
-
-Usage
------
-
-Once ready, deploy as follows:
-
- juju deploy keepalived
- juju add-relation haproxy keepalived
-
diff --git a/charms/trusty/keepalived/bin/charm_helpers_sync.py b/charms/trusty/keepalived/bin/charm_helpers_sync.py
deleted file mode 100644
index f67fdb9..0000000
--- a/charms/trusty/keepalived/bin/charm_helpers_sync.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Authors:
-# Adam Gandelman <adamg@ubuntu.com>
-
-import logging
-import optparse
-import os
-import subprocess
-import shutil
-import sys
-import tempfile
-import yaml
-from fnmatch import fnmatch
-
-import six
-
-CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
-
-
-def parse_config(conf_file):
- if not os.path.isfile(conf_file):
- logging.error('Invalid config file: %s.' % conf_file)
- return False
- return yaml.load(open(conf_file).read())
-
-
-def clone_helpers(work_dir, branch):
- dest = os.path.join(work_dir, 'charm-helpers')
- logging.info('Checking out %s to %s.' % (branch, dest))
- cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
- subprocess.check_call(cmd)
- return dest
-
-
-def _module_path(module):
- return os.path.join(*module.split('.'))
-
-
-def _src_path(src, module):
- return os.path.join(src, 'charmhelpers', _module_path(module))
-
-
-def _dest_path(dest, module):
- return os.path.join(dest, _module_path(module))
-
-
-def _is_pyfile(path):
- return os.path.isfile(path + '.py')
-
-
-def ensure_init(path):
- '''
- ensure directories leading up to path are importable, omitting
- parent directory, eg path='/hooks/helpers/foo'/:
- hooks/
- hooks/helpers/__init__.py
- hooks/helpers/foo/__init__.py
- '''
- for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
- _i = os.path.join(d, '__init__.py')
- if not os.path.exists(_i):
- logging.info('Adding missing __init__.py: %s' % _i)
- open(_i, 'wb').close()
-
-
-def sync_pyfile(src, dest):
- src = src + '.py'
- src_dir = os.path.dirname(src)
- logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
- if not os.path.exists(dest):
- os.makedirs(dest)
- shutil.copy(src, dest)
- if os.path.isfile(os.path.join(src_dir, '__init__.py')):
- shutil.copy(os.path.join(src_dir, '__init__.py'),
- dest)
- ensure_init(dest)
-
-
-def get_filter(opts=None):
- opts = opts or []
- if 'inc=*' in opts:
- # do not filter any files, include everything
- return None
-
- def _filter(dir, ls):
- incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
- _filter = []
- for f in ls:
- _f = os.path.join(dir, f)
-
- if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
- if True not in [fnmatch(_f, inc) for inc in incs]:
- logging.debug('Not syncing %s, does not match include '
- 'filters (%s)' % (_f, incs))
- _filter.append(f)
- else:
- logging.debug('Including file, which matches include '
- 'filters (%s): %s' % (incs, _f))
- elif (os.path.isfile(_f) and not _f.endswith('.py')):
- logging.debug('Not syncing file: %s' % f)
- _filter.append(f)
- elif (os.path.isdir(_f) and not
- os.path.isfile(os.path.join(_f, '__init__.py'))):
- logging.debug('Not syncing directory: %s' % f)
- _filter.append(f)
- return _filter
- return _filter
-
-
-def sync_directory(src, dest, opts=None):
- if os.path.exists(dest):
- logging.debug('Removing existing directory: %s' % dest)
- shutil.rmtree(dest)
- logging.info('Syncing directory: %s -> %s.' % (src, dest))
-
- shutil.copytree(src, dest, ignore=get_filter(opts))
- ensure_init(dest)
-
-
-def sync(src, dest, module, opts=None):
-
- # Sync charmhelpers/__init__.py for bootstrap code.
- sync_pyfile(_src_path(src, '__init__'), dest)
-
- # Sync other __init__.py files in the path leading to module.
- m = []
- steps = module.split('.')[:-1]
- while steps:
- m.append(steps.pop(0))
- init = '.'.join(m + ['__init__'])
- sync_pyfile(_src_path(src, init),
- os.path.dirname(_dest_path(dest, init)))
-
- # Sync the module, or maybe a .py file.
- if os.path.isdir(_src_path(src, module)):
- sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
- elif _is_pyfile(_src_path(src, module)):
- sync_pyfile(_src_path(src, module),
- os.path.dirname(_dest_path(dest, module)))
- else:
- logging.warn('Could not sync: %s. Neither a pyfile or directory, '
- 'does it even exist?' % module)
-
-
-def parse_sync_options(options):
- if not options:
- return []
- return options.split(',')
-
-
-def extract_options(inc, global_options=None):
- global_options = global_options or []
- if global_options and isinstance(global_options, six.string_types):
- global_options = [global_options]
- if '|' not in inc:
- return (inc, global_options)
- inc, opts = inc.split('|')
- return (inc, parse_sync_options(opts) + global_options)
-
-
-def sync_helpers(include, src, dest, options=None):
- if not os.path.isdir(dest):
- os.makedirs(dest)
-
- global_options = parse_sync_options(options)
-
- for inc in include:
- if isinstance(inc, str):
- inc, opts = extract_options(inc, global_options)
- sync(src, dest, inc, opts)
- elif isinstance(inc, dict):
- # could also do nested dicts here.
- for k, v in six.iteritems(inc):
- if isinstance(v, list):
- for m in v:
- inc, opts = extract_options(m, global_options)
- sync(src, dest, '%s.%s' % (k, inc), opts)
-
-if __name__ == '__main__':
- parser = optparse.OptionParser()
- parser.add_option('-c', '--config', action='store', dest='config',
- default=None, help='helper config file')
- parser.add_option('-D', '--debug', action='store_true', dest='debug',
- default=False, help='debug')
- parser.add_option('-b', '--branch', action='store', dest='branch',
- help='charm-helpers bzr branch (overrides config)')
- parser.add_option('-d', '--destination', action='store', dest='dest_dir',
- help='sync destination dir (overrides config)')
- (opts, args) = parser.parse_args()
-
- if opts.debug:
- logging.basicConfig(level=logging.DEBUG)
- else:
- logging.basicConfig(level=logging.INFO)
-
- if opts.config:
- logging.info('Loading charm helper config from %s.' % opts.config)
- config = parse_config(opts.config)
- if not config:
- logging.error('Could not parse config from %s.' % opts.config)
- sys.exit(1)
- else:
- config = {}
-
- if 'branch' not in config:
- config['branch'] = CHARM_HELPERS_BRANCH
- if opts.branch:
- config['branch'] = opts.branch
- if opts.dest_dir:
- config['destination'] = opts.dest_dir
-
- if 'destination' not in config:
- logging.error('No destination dir. specified as option or config.')
- sys.exit(1)
-
- if 'include' not in config:
- if not args:
- logging.error('No modules to sync specified as option or config.')
- sys.exit(1)
- config['include'] = []
- [config['include'].append(a) for a in args]
-
- sync_options = None
- if 'options' in config:
- sync_options = config['options']
- tmpd = tempfile.mkdtemp()
- try:
- checkout = clone_helpers(tmpd, config['branch'])
- sync_helpers(config['include'], checkout, config['destination'],
- options=sync_options)
- except Exception as e:
- logging.error("Could not sync: %s" % e)
- raise e
- finally:
- logging.debug('Cleaning up %s' % tmpd)
- shutil.rmtree(tmpd)
diff --git a/charms/trusty/keepalived/charm-helpers-sync.yaml b/charms/trusty/keepalived/charm-helpers-sync.yaml
deleted file mode 100644
index 0af5672..0000000
--- a/charms/trusty/keepalived/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
diff --git a/charms/trusty/keepalived/config.yaml b/charms/trusty/keepalived/config.yaml
deleted file mode 100644
index 21f3827..0000000
--- a/charms/trusty/keepalived/config.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
-options:
- virtual-ip:
- type: string
- description: |
- Virtual IP/netmask that will be moved between instances,
- e.g.: 10.1.2.3/16
- router-id:
- type: int
- description: |
- Virtual router identifier - a number between 1 and 255
- that's unique within the network segment \ No newline at end of file
diff --git a/charms/trusty/keepalived/copyright b/charms/trusty/keepalived/copyright
deleted file mode 100644
index 567db82..0000000
--- a/charms/trusty/keepalived/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/keepalived/hooks/actions.py b/charms/trusty/keepalived/hooks/actions.py
deleted file mode 100644
index 90440ec..0000000
--- a/charms/trusty/keepalived/hooks/actions.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from charmhelpers.core import hookenv
-
-
-def log_start(service_name):
- hookenv.log('keepalived starting')
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/__init__.py b/charms/trusty/keepalived/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/__init__.py b/charms/trusty/keepalived/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/decorators.py b/charms/trusty/keepalived/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/fstab.py b/charms/trusty/keepalived/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/hookenv.py b/charms/trusty/keepalived/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index 117429f..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,744 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-from functools import wraps
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-def relation_id():
- """The relation ID for the current relation hook"""
- return os.environ.get('JUJU_RELATION_ID', None)
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.path.basename(sys.argv[0])
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
-
- def __getitem__(self, key):
- """For regular dict lookups, check the current juju config first,
- then the previous (saved) copy. This ensures that user-saved values
- will be returned by a dict lookup.
-
- """
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- return (self._prev_dict or {})[key]
-
- def get(self, key, default=None):
- try:
- return self[key]
- except KeyError:
- return default
-
- def keys(self):
- prev_keys = []
- if self._prev_dict is not None:
- prev_keys = self._prev_dict.keys()
- return list(set(prev_keys + list(dict.keys(self))))
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- if self._prev_dict:
- for k, v in six.iteritems(self._prev_dict):
- if k not in self:
- self[k] = v
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=True):
- super(Hooks, self).__init__()
- self._hooks = {}
- self._config_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- self._hooks[hook_name]()
- if self._config_save:
- cfg = config()
- if cfg.implicit_save:
- cfg.save()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state
-
- If the status-set command is not found then assume this is juju < 1.23 and
- return 'unknown'
- """
- cmd = ['status-get']
- try:
- raw_status = subprocess.check_output(cmd, universal_newlines=True)
- status = raw_status.rstrip()
- return status
- except OSError as e:
- if e.errno == errno.ENOENT:
- return 'unknown'
- else:
- raise
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.iteritems():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/host.py b/charms/trusty/keepalived/hooks/charmhelpers/core/host.py
deleted file mode 100644
index 901a4cf..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = [
- 'gpasswd', '-a',
- username,
- group
- ]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def list_nics(nic_type):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
- interfaces = []
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- interface = matched.groups()[0]
- else:
- interface = line.split()[1].replace(":", "")
- interfaces.append(interface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/keepalived/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/services/base.py b/charms/trusty/keepalived/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index 98d344e..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- cfg = hookenv.config()
- if cfg.implicit_save:
- cfg.save()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/keepalived/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3eb5fb4..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-from charmhelpers.core import hookenv
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
-
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/strutils.py b/charms/trusty/keepalived/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index a2a784a..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/sysctl.py b/charms/trusty/keepalived/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/templating.py b/charms/trusty/keepalived/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/core/unitdata.py b/charms/trusty/keepalived/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 406a35c..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,477 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are automatically committed at hook exit. That's
- currently regardless of exit code.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def _scoped_query(self, stmt, params=None):
- if params is None:
- params = []
- return stmt, params
-
- def get(self, key, default=None, record=False):
- self.cursor.execute(
- *self._scoped_query(
- 'select data from kv where key=?', [key]))
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- stmt = "select key, data from kv where key like '%s%%'" % key_prefix
- self.cursor.execute(*self._scoped_query(stmt))
- result = self.cursor.fetchall()
-
- if not result:
- return None
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- serialized = json.dumps(value)
-
- self.cursor.execute(
- 'select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/keepalived/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 9a1a251..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,439 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_hold(packages, fatal=False):
- """Hold one or more packages"""
- cmd = ['apt-mark', 'hold']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd)
- else:
- subprocess.call(cmd)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource:
- pass
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/keepalived/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index 8dfce50..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- check_hash(dld_file, value, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/keepalived/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/keepalived/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/keepalived/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index ddc25b7..0000000
--- a/charms/trusty/keepalived/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e.message)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/keepalived/hooks/config-changed b/charms/trusty/keepalived/hooks/config-changed
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/keepalived/hooks/config-changed
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/keepalived/hooks/install b/charms/trusty/keepalived/hooks/install
deleted file mode 100755
index 7004e0a..0000000
--- a/charms/trusty/keepalived/hooks/install
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/python
-
-import setup
-setup.pre_install()
-
-from charmhelpers.core import hookenv
-from charmhelpers.fetch import apt_upgrade, apt_install
-
-
-def install():
- hookenv.log('Installing keepalived')
-
- apt_upgrade(fatal=True, dist=True)
- apt_install('keepalived', fatal=True)
-
-
-if __name__ == "__main__":
- install()
diff --git a/charms/trusty/keepalived/hooks/juju_info-relation-joined b/charms/trusty/keepalived/hooks/juju_info-relation-joined
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/keepalived/hooks/juju_info-relation-joined
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/keepalived/hooks/leader-elected b/charms/trusty/keepalived/hooks/leader-elected
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/keepalived/hooks/leader-elected
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/keepalived/hooks/services.py b/charms/trusty/keepalived/hooks/services.py
deleted file mode 100644
index bc456aa..0000000
--- a/charms/trusty/keepalived/hooks/services.py
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/python
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-from charmhelpers.core.services.base import ServiceManager
-from charmhelpers.core.services import helpers
-
-import actions
-
-SYSCTL_FILE = os.path.join(os.sep, 'etc', 'sysctl.d', '50-keepalived.conf')
-KEEPALIVED_CONFIG_FILE = os.path.join(os.sep, 'etc', 'keepalived',
- 'keepalived.conf')
-config = hookenv.config()
-
-
-def manage():
- manager = ServiceManager([
- {
- 'service': 'keepalived',
- 'required_data': [
- helpers.RequiredConfig('virtual-ip',
- 'router-id'),
- {'is_leader': hookenv.is_leader()}
- ],
- 'data_ready': [
- actions.log_start,
- helpers.template(
- source='keepalived.conf',
- target=KEEPALIVED_CONFIG_FILE,
- perms=0o644
- )
- ],
- # keepalived has no "status" command
- 'stop': [
- lambda arg: host.service_stop('keepalived')
- ],
- 'start': [
- lambda arg: host.service_restart('keepalived')
- ],
- },
- {
- 'service': 'procps',
- 'required_data': [
- {'sysctl': {'net.ipv4.ip_nonlocal_bind': 1}},
- ],
- 'data_ready': [
- helpers.template(
- source='50-keepalived.conf',
- target=SYSCTL_FILE,
- perms=0o644
- )
- ],
- }
- ])
- manager.manage()
diff --git a/charms/trusty/keepalived/hooks/setup.py b/charms/trusty/keepalived/hooks/setup.py
deleted file mode 100644
index 50854d9..0000000
--- a/charms/trusty/keepalived/hooks/setup.py
+++ /dev/null
@@ -1,17 +0,0 @@
-def pre_install():
- """
- Do any setup required before the install hook.
- """
- install_charmhelpers()
-
-
-def install_charmhelpers():
- """
- Install the charmhelpers library, if not present.
- """
- try:
- import charmhelpers # noqa
- except ImportError:
- import subprocess
- subprocess.check_call(['apt-get', 'install', '-y', 'python-pip'])
- subprocess.check_call(['pip', 'install', 'charmhelpers'])
diff --git a/charms/trusty/keepalived/hooks/start b/charms/trusty/keepalived/hooks/start
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/keepalived/hooks/start
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/keepalived/hooks/stop b/charms/trusty/keepalived/hooks/stop
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/keepalived/hooks/stop
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/keepalived/hooks/upgrade-charm b/charms/trusty/keepalived/hooks/upgrade-charm
deleted file mode 100755
index 5028988..0000000
--- a/charms/trusty/keepalived/hooks/upgrade-charm
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/python
-import services
-services.manage()
diff --git a/charms/trusty/keepalived/icon.svg b/charms/trusty/keepalived/icon.svg
deleted file mode 100644
index 7b82b7a..0000000
--- a/charms/trusty/keepalived/icon.svg
+++ /dev/null
@@ -1,293 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg"
- viewBox="0 0 96 96">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- <feBlend
- blend="normal"
- id="feBlend3895"
- in2="composite2" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="8.1490724"
- inkscape:cx="49.021381"
- inkscape:cy="46.975739"
- inkscape:document-units="px"
- inkscape:current-layer="layer3"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1056"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title />
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#333333;fill-opacity:0.93333334000000001;stroke:none;display:inline;filter:url(#filter1121);opacity:0.7"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACEHOLDER LETTER"
- style="display:inline">
- <text
- xml:space="preserve"
- style="font-style:normal;font-weight:normal;font-size:40px;line-height:125%;font-family:Sans;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;filter:url(#filter950)"
- x="17.458124"
- y="69.1772"
- id="text3891"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan3893"
- x="17.458124"
- y="69.1772"
- style="font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;font-size:56px;font-family:Ubuntu;-inkscape-font-specification:'Ubuntu Medium';fill:#ffffff;fill-opacity:1">ka</tspan></text>
- <rect
- style="opacity:0.7;fill:none;stroke:none"
- id="rect3021"
- width="64"
- height="64"
- x="16"
- y="15.449201" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/keepalived/metadata.yaml b/charms/trusty/keepalived/metadata.yaml
deleted file mode 100644
index 88420e2..0000000
--- a/charms/trusty/keepalived/metadata.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-name: keepalived
-summary: Failover and monitoring daemon for LVS clusters
-maintainer: Michał Sawicz <michal.sawicz@canonical.com>
-description: |
- keepalived is used for monitoring real servers within a Linux Virtual
- Server (LVS) cluster. keepalived can be configured to remove real
- servers from the cluster pool if it stops responding, as well as send
- a notification email to make the admin aware of the service failure.
- In addition, keepalived implements an independent Virtual Router
- Redundancy Protocol (VRRPv2; see rfc2338 for additional info)
- framework for director failover.
-tags:
- - cache-proxy
-subordinate: true
-requires:
- juju_info:
- interface: juju-info
- scope: container \ No newline at end of file
diff --git a/charms/trusty/keepalived/templates/50-keepalived.conf b/charms/trusty/keepalived/templates/50-keepalived.conf
deleted file mode 100644
index 6b023e1..0000000
--- a/charms/trusty/keepalived/templates/50-keepalived.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for key in sysctl %}
-{{ key }}={{ sysctl[key] }}
-{% endfor %} \ No newline at end of file
diff --git a/charms/trusty/keepalived/templates/keepalived.conf b/charms/trusty/keepalived/templates/keepalived.conf
deleted file mode 100644
index 594e926..0000000
--- a/charms/trusty/keepalived/templates/keepalived.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-vrrp_instance VI_1 {
- interface eth0
- state {% if is_leader %}MASTER{% else %}BACKUP{% endif %}
- priority {% if is_leader %}101{% else %}100{% endif %}
- virtual_router_id {{ config['router-id'] }}
-
- virtual_ipaddress {
- {{ config['virtual-ip'] }}
- }
-} \ No newline at end of file
diff --git a/charms/trusty/keepalived/tests/00-setup b/charms/trusty/keepalived/tests/00-setup
deleted file mode 100755
index f0616a5..0000000
--- a/charms/trusty/keepalived/tests/00-setup
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-sudo add-apt-repository ppa:juju/stable -y
-sudo apt-get update
-sudo apt-get install amulet python-requests -y
diff --git a/charms/trusty/keepalived/tests/10-deploy b/charms/trusty/keepalived/tests/10-deploy
deleted file mode 100755
index 1dfaa80..0000000
--- a/charms/trusty/keepalived/tests/10-deploy
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python3
-
-import amulet
-import requests
-import unittest
-
-
-class TestDeployment(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- cls.deployment = amulet.Deployment()
-
- cls.deployment.add('keepalived')
- cls.deployment.expose('keepalived')
-
- try:
- cls.deployment.setup(timeout=900)
- cls.deployment.sentry.wait()
- except amulet.helpers.TimeoutError:
- amulet.raise_status(amulet.SKIP, msg="Environment wasn't stood up in time")
- except:
- raise
- cls.unit = cls.deployment.sentry.unit['keepalived/0']
-
- def test_case(self):
- # Now you can use self.deployment.sentry.unit[UNIT] to address each of
- # the units and perform more in-depth steps. You can also reference
- # the first unit as self.unit.
- # There are three test statuses that can be triggered with
- # amulet.raise_status():
- # - amulet.PASS
- # - amulet.FAIL
- # - amulet.SKIP
- # Each unit has the following methods:
- # - .info - An array of the information of that unit from Juju
- # - .file(PATH) - Get the details of a file on that unit
- # - .file_contents(PATH) - Get plain text output of PATH file from that unit
- # - .directory(PATH) - Get details of directory
- # - .directory_contents(PATH) - List files and folders in PATH on that unit
- # - .relation(relation, service:rel) - Get relation data from return service
- # add tests here to confirm service is up and working properly
- # For example, to confirm that it has a functioning HTTP server:
- # page = requests.get('http://{}'.format(self.unit.info['public-address']))
- # page.raise_for_status()
- # More information on writing Amulet tests can be found at:
- # https://juju.ubuntu.com/docs/tools-amulet.html
- pass
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/charms/trusty/keepalived/unit_tests/test_actions.py b/charms/trusty/keepalived/unit_tests/test_actions.py
deleted file mode 100755
index 5105ab4..0000000
--- a/charms/trusty/keepalived/unit_tests/test_actions.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-import mock
-import unittest
-from pkg_resources import resource_filename
-
-# allow importing actions from the hooks directory
-sys.path.append(resource_filename(__name__, '../hooks'))
-import actions
-
-
-class TestActions(unittest.TestCase):
- @mock.patch('charmhelpers.core.hookenv.log')
- def test_log_start(self, log):
- actions.log_start('test-service')
- log.assert_called_once_with('keepalived starting')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/charms/trusty/neutron-api-contrail/.bzrignore b/charms/trusty/neutron-api-contrail/.bzrignore
deleted file mode 100644
index ba077a4..0000000
--- a/charms/trusty/neutron-api-contrail/.bzrignore
+++ /dev/null
@@ -1 +0,0 @@
-bin
diff --git a/charms/trusty/neutron-api-contrail/Makefile b/charms/trusty/neutron-api-contrail/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/neutron-api-contrail/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/neutron-api-contrail/README.md b/charms/trusty/neutron-api-contrail/README.md
deleted file mode 100644
index 481e2fa..0000000
--- a/charms/trusty/neutron-api-contrail/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This subordinate charm provides the Neutron API component which configures
-neutron-server for OpenContrail.
-Only OpenStack Icehouse or newer is supported.
-
-Usage
------
-
-Neutron API, Contrail Configuration and Keystone are prerequisite services to
-deploy.
-
-Neutron API should be deployed with legacy plugin management set to false:
-
- neutron-api:
- manage-neutron-plugin-legacy-mode: false
-
-Once ready, deploy and relate as follows:
-
- juju deploy neutron-api-contrail
- juju add-relation neutron-api neutron-api-contrail
- juju add-relation neutron-api-contrail contrail-configuration
- juju add-relation neutron-api-contrail keystone
-
-Install Sources
----------------
-
-The version of OpenContrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
diff --git a/charms/trusty/neutron-api-contrail/charm-helpers-sync.yaml b/charms/trusty/neutron-api-contrail/charm-helpers-sync.yaml
deleted file mode 100644
index 0af5672..0000000
--- a/charms/trusty/neutron-api-contrail/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
diff --git a/charms/trusty/neutron-api-contrail/config.yaml b/charms/trusty/neutron-api-contrail/config.yaml
deleted file mode 100644
index fe8f5f8..0000000
--- a/charms/trusty/neutron-api-contrail/config.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-options:
- install-sources:
- type: string
- default: |
- - "ppa:opencontrail/ppa"
- - "ppa:opencontrail/r2.20"
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
diff --git a/charms/trusty/neutron-api-contrail/copyright b/charms/trusty/neutron-api-contrail/copyright
deleted file mode 100644
index 567db82..0000000
--- a/charms/trusty/neutron-api-contrail/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/__init__.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/__init__.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/decorators.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/files.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/fstab.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hookenv.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index ab53a78..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,898 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peer'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peer``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peer'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/host.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/host.py
deleted file mode 100644
index cb3c527..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hugepage.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 4aaca3f..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/kernel.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/base.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3f67783..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/strutils.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/sysctl.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/templating.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/unitdata.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index efd7f9f..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f023b26..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/neutron-api-contrail/hooks/config-changed b/charms/trusty/neutron-api-contrail/hooks/config-changed
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-broken b/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-broken
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-changed b/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-changed
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-departed b/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-departed
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/contrail-api-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-broken b/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-broken
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-changed b/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-changed
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-departed b/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-departed
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/identity-admin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/install b/charms/trusty/neutron-api-contrail/hooks/install
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/neutron-plugin-api-subordinate-relation-joined b/charms/trusty/neutron-api-contrail/hooks/neutron-plugin-api-subordinate-relation-joined
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/neutron-plugin-api-subordinate-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_hooks.py b/charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_hooks.py
deleted file mode 100755
index 77a438a..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_hooks.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-
-import sys
-
-from apt_pkg import version_compare
-import json
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- log,
- relation_get,
- relation_set
-)
-
-from charmhelpers.core.host import (
- restart_on_change,
- service_restart
-)
-
-from charmhelpers.fetch import (
- apt_install,
- apt_upgrade,
- configure_sources
-)
-
-from neutron_api_contrail_utils import (
- CONTRAIL_VERSION,
- OPENSTACK_VERSION,
- write_plugin_config
-)
-
-PACKAGES = [ "neutron-plugin-contrail", "contrail-config-openstack" ]
-
-hooks = Hooks()
-config = config()
-
-@hooks.hook("config-changed")
-def config_changed():
- pass
-
-@hooks.hook("contrail-api-relation-changed")
-def contrail_api_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- contrail_api_relation()
-
-@hooks.hook("contrail-api-relation-departed")
-@hooks.hook("contrail-api-relation-broken")
-@restart_on_change({"/etc/neutron/plugins/opencontrail/ContrailPlugin.ini": ["neutron-server"]})
-def contrail_api_relation():
- write_plugin_config()
-
-@hooks.hook("identity-admin-relation-changed")
-def identity_admin_changed():
- if not relation_get("service_hostname"):
- log("Relation not ready")
- return
- identity_admin_relation()
-
-@hooks.hook("identity-admin-relation-departed")
-@hooks.hook("identity-admin-relation-broken")
-@restart_on_change({"/etc/neutron/plugins/opencontrail/ContrailPlugin.ini": ["neutron-server"]})
-def identity_admin_relation():
- write_plugin_config()
-
-@hooks.hook()
-def install():
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- apt_install(PACKAGES, fatal=True)
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
-
-@hooks.hook("neutron-plugin-api-subordinate-relation-joined")
-def neutron_plugin_joined():
- # create plugin config
- plugin = "neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2" \
- if version_compare(CONTRAIL_VERSION, "1.20~") >= 0 \
- else "neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_core.NeutronPluginContrailCoreV2"
- service_plugins = "neutron_plugin_contrail.plugins.opencontrail.loadbalancer.v2.plugin.LoadBalancerPluginV2" \
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0 \
- and version_compare(OPENSTACK_VERSION, "2:7.0.0") >= 0 \
- else " "
- extensions = [ "/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions" ]
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0 \
- and version_compare(OPENSTACK_VERSION, "2:7.0.0") >= 0:
- extensions.append("/usr/lib/python2.7/dist-packages/neutron_lbaas/extensions")
- conf = {
- "neutron-api": {
- "/etc/neutron/neutron.conf": {
- "sections": {
- "DEFAULT": [
- ("api_extensions_path", ":".join(extensions))
- ]
- }
- }
- }
- }
- settings = { "neutron-plugin": "contrail",
- "core-plugin": plugin,
- "neutron-plugin-config": "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini",
- "service-plugins": service_plugins,
- "quota-driver": "neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver",
- "subordinate_configuration": json.dumps(conf) }
- relation_set(relation_settings=settings)
-
-@hooks.hook("upgrade-charm")
-def upgrade_charm():
- write_plugin_config()
- service_restart("neutron-server")
-
-if __name__ == "__main__":
- main()
diff --git a/charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_utils.py b/charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_utils.py
deleted file mode 100644
index 034f7fe..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/neutron_api_contrail_utils.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from socket import gethostbyname
-from subprocess import CalledProcessError, check_output
-
-import apt_pkg
-from apt_pkg import version_compare
-
-from charmhelpers.core.hookenv import (
- related_units,
- relation_get,
- relation_ids
-)
-
-from charmhelpers.core.templating import render
-
-apt_pkg.init()
-
-def dpkg_version(pkg):
- try:
- return check_output(["dpkg-query", "-f", "${Version}\\n", "-W", pkg]).rstrip()
- except CalledProcessError:
- return None
-
-CONTRAIL_VERSION = dpkg_version("contrail-config-openstack")
-OPENSTACK_VERSION = dpkg_version("neutron-server")
-
-def contrail_api_ctx():
- ctxs = [ { "api_server": vip if vip \
- else gethostbyname(relation_get("private-address", unit, rid)),
- "api_port": port }
- for rid in relation_ids("contrail-api")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def identity_admin_ctx():
- ctxs = [ { "auth_host": gethostbyname(hostname),
- "auth_port": relation_get("service_port", unit, rid),
- "admin_user": relation_get("service_username", unit, rid),
- "admin_password": relation_get("service_password", unit, rid),
- "admin_tenant_name": relation_get("service_tenant_name", unit, rid) }
- for rid in relation_ids("identity-admin")
- for unit, hostname in
- ((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
- if hostname ]
- return ctxs[0] if ctxs else {}
-
-def write_plugin_config():
- ctx = {}
- ctx.update(contrail_api_ctx())
- ctx.update(identity_admin_ctx())
- if version_compare(OPENSTACK_VERSION, "1:2015.1~") >= 0:
- ctx["authtoken"] = True
- if version_compare(OPENSTACK_VERSION, "2:7.0.0") >= 0:
- ctx["authtoken_creds"] = True
- render("ContrailPlugin.ini",
- "/etc/neutron/plugins/opencontrail/ContrailPlugin.ini",
- ctx, "root", "neutron", 0440)
diff --git a/charms/trusty/neutron-api-contrail/hooks/start b/charms/trusty/neutron-api-contrail/hooks/start
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/stop b/charms/trusty/neutron-api-contrail/hooks/stop
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/hooks/upgrade-charm b/charms/trusty/neutron-api-contrail/hooks/upgrade-charm
deleted file mode 120000
index e70ad6e..0000000
--- a/charms/trusty/neutron-api-contrail/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-neutron_api_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-api-contrail/icon.svg b/charms/trusty/neutron-api-contrail/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/neutron-api-contrail/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/neutron-api-contrail/metadata.yaml b/charms/trusty/neutron-api-contrail/metadata.yaml
deleted file mode 100644
index 217097c..0000000
--- a/charms/trusty/neutron-api-contrail/metadata.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-name: neutron-api-contrail
-summary: OpenStack Neutron API OpenContrail Agent
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- Neutron is a virtual network service for Openstack, and a part of
- Netstack. Just like OpenStack Nova provides an API to dynamically
- request and configure virtual servers, Neutron provides an API to
- dynamically request and configure virtual networks. These networks
- connect "interfaces" from other OpenStack services (e.g., virtual NICs
- from Nova VMs). The Neutron API supports extensions to provide
- advanced network capabilities (e.g., QoS, ACLs, network monitoring,
- etc.)
- .
- This charm provides the OpenStack Neutron API OpenContrail agent, managing
- L2 connectivity on neutron-api services.
-categories:
- - openstack
-subordinate: true
-provides:
- neutron-plugin-api-subordinate:
- interface: neutron-plugin-api-subordinate
- scope: container
-requires:
- container:
- interface: juju-info
- scope: container
- contrail-api:
- interface: contrail-api
- identity-admin:
- interface: keystone-admin
diff --git a/charms/trusty/neutron-api-contrail/templates/ContrailPlugin.ini b/charms/trusty/neutron-api-contrail/templates/ContrailPlugin.ini
deleted file mode 100644
index 59fba1e..0000000
--- a/charms/trusty/neutron-api-contrail/templates/ContrailPlugin.ini
+++ /dev/null
@@ -1,30 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[APISERVER]
-api_server_ip = {{ api_server }}
-api_server_port = {{ api_port }}
-multi_tenancy = True
-
-[KEYSTONE]
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-auth_url = http://{{ auth_host }}:{{ auth_port }}/v2.0
-
-{%- if authtoken %}
-
-[keystone_authtoken]
-auth_host = {{ auth_host }}
-auth_port = {{ auth_port }}
-auth_protocol = http
-{%- if authtoken_creds %}
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-{%- endif %}
-
-{%- endif %}
-
diff --git a/charms/trusty/neutron-contrail/.bzrignore b/charms/trusty/neutron-contrail/.bzrignore
deleted file mode 100644
index ba077a4..0000000
--- a/charms/trusty/neutron-contrail/.bzrignore
+++ /dev/null
@@ -1 +0,0 @@
-bin
diff --git a/charms/trusty/neutron-contrail/Makefile b/charms/trusty/neutron-contrail/Makefile
deleted file mode 100644
index 378713f..0000000
--- a/charms/trusty/neutron-contrail/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/make
-PYTHON := /usr/bin/env python
-
-bin/charm_helpers_sync.py:
- @mkdir -p bin
- @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
- > bin/charm_helpers_sync.py
-
-sync: bin/charm_helpers_sync.py
- @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
diff --git a/charms/trusty/neutron-contrail/README.md b/charms/trusty/neutron-contrail/README.md
deleted file mode 100644
index 6257304..0000000
--- a/charms/trusty/neutron-contrail/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Overview
---------
-
-OpenContrail (www.opencontrail.org) is a fully featured Software Defined
-Networking (SDN) solution for private clouds. It supports high performance
-isolated tenant networks without requiring external hardware support. It
-provides a Neutron plugin to integrate with OpenStack.
-
-This charm is designed to be used in conjunction with the rest of the OpenStack
-related charms in the charm store to virtualize the network that Nova Compute
-instances plug into.
-
-This subordinate charm provides the Nova Compute vRouter component which
-contains the contrail-vrouter-agent service.
-Only OpenStack Icehouse or newer is supported.
-Juju 1.23.2+ required.
-
-Usage
------
-
-Nova Compute, Contrail Configuration and Keystone are prerequisite services to
-deploy.
-
-Nova Compute should be deployed with legacy plugin management set to false:
-
- nova-compute:
- manage-neutron-plugin-legacy-mode: false
-
-Once ready, deploy and relate as follows:
-
- juju deploy neutron-contrail
- juju add-relation nova-compute neutron-contrail
- juju add-relation neutron-contrail:contrail-discovery contrail-configuration:contrail-discovery
- juju add-relation neutron-contrail:contrail-api contrail-configuration:contrail-api
- juju add-relation neutron-contrail keystone
-
-Install Sources
----------------
-
-The version of OpenContrail installed when deploying can be changed using the
-'install-sources' option. This is a multilined value that may refer to PPAs or
-Deb repositories.
-
-Control Node Relation
----------------------
-
-This charm is typically related to contrail-configuration:contrail-discovery.
-This instructs the Contrail vRouter agent to use the discovery service for
-locating control nodes. This is the recommended approach.
-
-Should the user wish to use vRouter configuration that specifies the location
-of control nodes explicitly, not using the discovery service, they can relate
-to a contrail-control charm:
-
- juju add-relation neutron-contrail contrail-control
-
-Nova Metadata
--------------
-
-To use Nova Metadata with Nova Compute instances, a metadata service must first
-be registered. Registration allows OpenContrail to create the appropriate
-network config to proxy requests from instances to a nova-api service on the
-network.
-
-Option 'local-metadata-server' controls if a local nova-api-metadata service is
-started (per Compute Node) and registered to serve metadata requests. It is
-the recommended approach for serving metadata to instances and is enabled by
-default.
-
-Alternatively, relating to a charm implementing neutron-metadata interface will
-use this external metadata service:
-
- juju add-relation neutron-contrail neutron-metadata-charm
-
-contrail-configuration charm also needs to be related to the same charm to
-register the metadata service:
-
- juju add-relation contrail-configuration neutron-metadata-charm
-
-Virtual Gateways
-----------------
-
-For launched instances to be able to access external networks e.g. the Internet
-a gateway is required that allows virtual network traffic to traverse an IP
-network.
-
-For production setups, this is typically a hardware gateway. For testing
-purposes OpenContrail provides a software gateway (Simple Gateway) that runs on
-Compute Node(s) and provides this function.
-
-Option 'virtual-gateways' allows specifying of one or more software gateways.
-The value is a YAML encoded string using a list of maps, where each map
-consists of the following attributes:
-
- project - project name
- network - network name
- interface - interface to use (will be created)
- subnets - list of virtual subnets to route
- routes - list of routes gateway will make available to virtual subnets,
- 0.0.0.0/0 selects all routes
-
-For example to create a gateway for virtual subnet 10.0.10.0/24 on
-'admin:public' network using local interface vgw for routing:
-
- juju set neutron-contrail \
- "virtual-gateways=[ { project: admin, network: public, interface: vgw, subnets: [ 10.0.10.0/24 ], routes: [ 0.0.0.0/0 ] } ]"
-
-Previously specified gateways will be removed.
-
-The routing of external IP networks needs to be updated if virtual network
-traffic will traverse it. Traffic flow from the IP network should be directed to
-one of the Compute Nodes.
-
-For example a static route could be added to the router of the Compute Node
-network:
-
- // assuming it's a linux box
- sudo ip route add 10.0.10.0/24 via <compute ip>
-
-The virtual-gateways option can be used with 'floating-ip-pools' option of the
-contrail-configuration charm to create a typical Neutron setup of launched
-instances attached to a private network, each with an assigned public/external
-floating IP.
-
-Using the running example above, you would use Neutron to create an external
-network with subnet 10.0.10.0/24 and a private network of 10.0.5.0/24. You would
-set the virtual-gateways option (as above) and the floating-ip-pools option.
-You would attach launched instances to the private network and then assign them
-floating IPs from the external network. vRouter will automatically perform 1:1
-NAT of an external address to a private one. (Note: security groups may still
-need to be updated to allow traffic flow).
diff --git a/charms/trusty/neutron-contrail/charm-helpers-sync.yaml b/charms/trusty/neutron-contrail/charm-helpers-sync.yaml
deleted file mode 100644
index 0af5672..0000000
--- a/charms/trusty/neutron-contrail/charm-helpers-sync.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-branch: lp:charm-helpers
-destination: hooks/charmhelpers
-include:
- - core
- - fetch
diff --git a/charms/trusty/neutron-contrail/config.yaml b/charms/trusty/neutron-contrail/config.yaml
deleted file mode 100644
index 8430326..0000000
--- a/charms/trusty/neutron-contrail/config.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-options:
- install-sources:
- type: string
- default: |
- - "ppa:opencontrail/ppa"
- - "ppa:opencontrail/r2.20"
- description: Package sources for install
- install-keys:
- type: string
- description: Apt keys for package install sources
- vhost-interface:
- type: string
- description: |
- Specify the interface to install vhost0 on. If left empty, vhost0 will
- be installed on the default gateway interface.
- vhost-gateway:
- type: string
- default: auto
- description: |
- Specify the gateway for vhost0, either an IPv4 address or keyword 'auto'.
- 'auto' will set gateway automatically based on host's existing routes.
- control-interface:
- type: string
- default: vhost0
- description: |
- Specify the interface to use for the control channel.
- Default is to use vRouter interface that will be created.
- local-metadata-server:
- type: boolean
- default: true
- description: |
- Run a local instance of nova-api-metadata for serving metadata to VMs.
- An external metadata server (neutron-metadata relation) is not required
- when enabled.
- remove-juju-bridge:
- type: boolean
- default: false
- description: |
- Juju on MAAS creates a juju-br0 bridge for deploying LXC and KVM
- workloads. Enable this to remove this bridge if you want to install
- vhost0 directly on the underlying interface.
-
- WARNING: This will break current and future juju-deployed LXC or KVM
- workloads on all machines where this is set to true.
- virtual-gateways:
- type: string
- description: |
- Virtual gateways to create (software based).
-
- Using a YAML encoded string specify one or more gateways using a list
- of maps, where each map consists of the following attributes:
-
- project - project name
- network - network name
- interface - interface to use (will be created)
- subnets - list of virtual subnets to route
- routes - list of routes gateway will make available to virtual subnets,
- 0.0.0.0/0 selects all routes
-
- For example:
-
- // make any network available to virtual subnet 10.0.10.0/24 on
- // admin:public network using local interface vgw to route
- [ { project: admin, network: public, interface: vgw, subnets: [ 10.0.10.0/24 ], routes: [ 0.0.0.0/0 ] } ]
- # development options
- contrail-api-ip:
- type: string
- description: Specify contrail-api ip manually
- contrail-api-port:
- type: int
- description: Specify contrail-api port manually
- discovery-server-ip:
- type: string
- description: Specify discovery server ip manually
diff --git a/charms/trusty/neutron-contrail/copyright b/charms/trusty/neutron-contrail/copyright
deleted file mode 100644
index 4081144..0000000
--- a/charms/trusty/neutron-contrail/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2014, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/neutron-contrail/files/60-vrouter-vgw.conf b/charms/trusty/neutron-contrail/files/60-vrouter-vgw.conf
deleted file mode 100644
index 3e7fe4c..0000000
--- a/charms/trusty/neutron-contrail/files/60-vrouter-vgw.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# sysctl values for vrouter gateway
-
-net.ipv4.ip_forward=1
diff --git a/charms/trusty/neutron-contrail/files/contrail-nodemgr-vrouter.ini b/charms/trusty/neutron-contrail/files/contrail-nodemgr-vrouter.ini
deleted file mode 100644
index 860812d..0000000
--- a/charms/trusty/neutron-contrail/files/contrail-nodemgr-vrouter.ini
+++ /dev/null
@@ -1,6 +0,0 @@
-[eventlistener:contrail-vrouter-nodemgr]
-command=/bin/bash -c "exec python /usr/bin/contrail-nodemgr --nodetype=contrail-vrouter"
-events=PROCESS_COMMUNICATION,PROCESS_STATE,TICK_60
-buffer_size=10000
-stdout_logfile=/var/log/contrail/contrail-vrouter-nodemgr-stdout.log
-stderr_logfile=/var/log/contrail/contrail-vrouter-nodemgr-stderr.log
diff --git a/charms/trusty/neutron-contrail/files/contrail-vrouter-nodemgr b/charms/trusty/neutron-contrail/files/contrail-vrouter-nodemgr
deleted file mode 100644
index 1c6a55b..0000000
--- a/charms/trusty/neutron-contrail/files/contrail-vrouter-nodemgr
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-
-# chkconfig: 2345 99 01
-# description: Juniper Network vRouter Node Manager
-
-supervisorctl -s unix:///tmp/supervisord_vrouter.sock ${1} `basename ${0}`
diff --git a/charms/trusty/neutron-contrail/files/contrail-vrouter.rules b/charms/trusty/neutron-contrail/files/contrail-vrouter.rules
deleted file mode 100644
index 54cba11..0000000
--- a/charms/trusty/neutron-contrail/files/contrail-vrouter.rules
+++ /dev/null
@@ -1,3 +0,0 @@
-{ "Rules": [
- ]
-}
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/__init__.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/__init__.py
deleted file mode 100644
index f72e7f8..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/__init__.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/__init__.py
deleted file mode 100644
index d1400a0..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/decorators.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/decorators.py
deleted file mode 100644
index bb05620..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/files.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/files.py
deleted file mode 100644
index 0f12d32..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/files.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/fstab.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/fstab.py
deleted file mode 100644
index 3056fba..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/hookenv.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/hookenv.py
deleted file mode 100644
index ab53a78..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,898 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-import glob
-import os
-import json
-import yaml
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = str((func, args, kwargs))
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def service_name():
- """The name service group this unit belongs to"""
- return local_unit().split('/')[0]
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path):
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- self._prev_dict = json.load(f)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-@cached
-def config(scope=None):
- """Juju charm configuration"""
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- try:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- if scope is not None:
- return config_data
- return Config(config_data)
- except ValueError:
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peer'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peer``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peer'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-@cached
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/host.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/host.py
deleted file mode 100644
index cb3c527..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/host.py
+++ /dev/null
@@ -1,586 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-from contextlib import contextmanager
-from collections import OrderedDict
-
-import six
-
-from .hookenv import log
-from .fstab import Fstab
-
-
-def service_start(service_name):
- """Start a system service"""
- return service('start', service_name)
-
-
-def service_stop(service_name):
- """Stop a system service"""
- return service('stop', service_name)
-
-
-def service_restart(service_name):
- """Restart a system service"""
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if
- reload fails"""
- service_result = service('reload', service_name)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot."""
- stopped = service_stop(service_name)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d"):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service"""
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- # XXX: Support SystemD too
- raise ValueError(
- "Unable to detect {0} as either Upstart {1} or SysV {2}".format(
- service_name, upstart_file, sysv_file))
-
- started = service_start(service_name)
- return started
-
-
-def service(action, service_name):
- """Control a system service"""
- cmd = ['service', service_name, action]
- return subprocess.call(cmd) == 0
-
-
-def service_running(service):
- """Determine whether a system service is running"""
- try:
- output = subprocess.check_output(
- ['service', service, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- if ("start/running" in output or "is running" in output):
- return True
- else:
- return False
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def adduser(username, password=None, shell='/bin/bash', system_user=False):
- """Add a user to the system"""
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def add_group(group_name, system_group=False):
- """Add a group to the system"""
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- except KeyError:
- log('creating group {0}'.format(group_name))
- cmd = ['addgroup']
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def rsync(from_path, to_path, flags='-r', options=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- target.write(content)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab
- """
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file
- """
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """
- Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """
- Generate a hash checksum of all files matching 'path'. Standard wildcards
- like '*' and '?' are supported, see documentation for the 'glob' module for
- more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """
- Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- pass
-
-
-def restart_on_change(restart_map, stopstart=False):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
- """
- def wrap(f):
- def wrapped_f(*args, **kwargs):
- checksums = {path: path_hash(path) for path in restart_map}
- f(*args, **kwargs)
- restarts = []
- for path in restart_map:
- if path_hash(path) != checksums[path]:
- restarts += restart_map[path]
- services_list = list(OrderedDict.fromkeys(restarts))
- if not stopstart:
- for service_name in services_list:
- service('restart', service_name)
- else:
- for action in ['stop', 'start']:
- for service_name in services_list:
- service(action, service_name)
- return wrapped_f
- return wrap
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- '''Return a list of nics of given type(s)'''
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- '''Set MTU on a network interface'''
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- '''Compare supplied revno with the revno of the installed package
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- '''
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
-
-
-@contextmanager
-def chdir(d):
- cur = os.getcwd()
- try:
- yield os.chdir(d)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True):
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- for root, dirs, files in os.walk(path):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- chownr(path, owner, group, follow_links=False)
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/hugepage.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/hugepage.py
deleted file mode 100644
index 4aaca3f..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/kernel.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/kernel.py
deleted file mode 100644
index 5dc6495..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-from subprocess import check_call, check_output
-import re
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- check_call(cmd)
- if persist:
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image"""
- return check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 0928158..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/base.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/base.py
deleted file mode 100644
index a42660c..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- new_ports = service.get('ports', [])
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port):
- old_port = int(old_port)
- if old_port not in new_ports:
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- if event_name == 'start':
- hookenv.open_port(port)
- elif event_name == 'stop':
- hookenv.close_port(port)
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3f67783..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/strutils.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/strutils.py
deleted file mode 100644
index 7e3f969..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/sysctl.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/sysctl.py
deleted file mode 100644
index 21cc8ab..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/templating.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/templating.py
deleted file mode 100644
index 4531999..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8'):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- Note: Using this requires python-jinja2; if it is not installed, calling
- this will attempt to use charmhelpers.fetch.apt_install to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- apt_install('python-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- loader = Environment(loader=FileSystemLoader(templates_dir))
- try:
- source = source
- template = loader.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/unitdata.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/core/unitdata.py
deleted file mode 100644
index 338104e..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-#
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/__init__.py
deleted file mode 100644
index 1cfb99f..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/__init__.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import importlib
-from tempfile import NamedTemporaryFile
-import time
-from yaml import safe_load
-from charmhelpers.core.host import (
- lsb_release
-)
-import subprocess
-from charmhelpers.core.hookenv import (
- config,
- log,
-)
-import os
-
-import six
-if six.PY3:
- from urllib.parse import urlparse, urlunparse
-else:
- from urlparse import urlparse, urlunparse
-
-
-CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
-"""
-PROPOSED_POCKET = """# Proposed
-deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
-"""
-CLOUD_ARCHIVE_POCKETS = {
- # Folsom
- 'folsom': 'precise-updates/folsom',
- 'precise-folsom': 'precise-updates/folsom',
- 'precise-folsom/updates': 'precise-updates/folsom',
- 'precise-updates/folsom': 'precise-updates/folsom',
- 'folsom/proposed': 'precise-proposed/folsom',
- 'precise-folsom/proposed': 'precise-proposed/folsom',
- 'precise-proposed/folsom': 'precise-proposed/folsom',
- # Grizzly
- 'grizzly': 'precise-updates/grizzly',
- 'precise-grizzly': 'precise-updates/grizzly',
- 'precise-grizzly/updates': 'precise-updates/grizzly',
- 'precise-updates/grizzly': 'precise-updates/grizzly',
- 'grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-grizzly/proposed': 'precise-proposed/grizzly',
- 'precise-proposed/grizzly': 'precise-proposed/grizzly',
- # Havana
- 'havana': 'precise-updates/havana',
- 'precise-havana': 'precise-updates/havana',
- 'precise-havana/updates': 'precise-updates/havana',
- 'precise-updates/havana': 'precise-updates/havana',
- 'havana/proposed': 'precise-proposed/havana',
- 'precise-havana/proposed': 'precise-proposed/havana',
- 'precise-proposed/havana': 'precise-proposed/havana',
- # Icehouse
- 'icehouse': 'precise-updates/icehouse',
- 'precise-icehouse': 'precise-updates/icehouse',
- 'precise-icehouse/updates': 'precise-updates/icehouse',
- 'precise-updates/icehouse': 'precise-updates/icehouse',
- 'icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-icehouse/proposed': 'precise-proposed/icehouse',
- 'precise-proposed/icehouse': 'precise-proposed/icehouse',
- # Juno
- 'juno': 'trusty-updates/juno',
- 'trusty-juno': 'trusty-updates/juno',
- 'trusty-juno/updates': 'trusty-updates/juno',
- 'trusty-updates/juno': 'trusty-updates/juno',
- 'juno/proposed': 'trusty-proposed/juno',
- 'trusty-juno/proposed': 'trusty-proposed/juno',
- 'trusty-proposed/juno': 'trusty-proposed/juno',
- # Kilo
- 'kilo': 'trusty-updates/kilo',
- 'trusty-kilo': 'trusty-updates/kilo',
- 'trusty-kilo/updates': 'trusty-updates/kilo',
- 'trusty-updates/kilo': 'trusty-updates/kilo',
- 'kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-kilo/proposed': 'trusty-proposed/kilo',
- 'trusty-proposed/kilo': 'trusty-proposed/kilo',
- # Liberty
- 'liberty': 'trusty-updates/liberty',
- 'trusty-liberty': 'trusty-updates/liberty',
- 'trusty-liberty/updates': 'trusty-updates/liberty',
- 'trusty-updates/liberty': 'trusty-updates/liberty',
- 'liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-liberty/proposed': 'trusty-proposed/liberty',
- 'trusty-proposed/liberty': 'trusty-proposed/liberty',
- # Mitaka
- 'mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka': 'trusty-updates/mitaka',
- 'trusty-mitaka/updates': 'trusty-updates/mitaka',
- 'trusty-updates/mitaka': 'trusty-updates/mitaka',
- 'mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
- 'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
-}
-
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
- 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
-)
-
-APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
-APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
-APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
-
-
-class SourceConfigError(Exception):
- pass
-
-
-class UnhandledSource(Exception):
- pass
-
-
-class AptLockError(Exception):
- pass
-
-
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
-def filter_installed_packages(packages):
- """Returns a list of packages that require installation"""
- cache = apt_cache()
- _pkgs = []
- for package in packages:
- try:
- p = cache[package]
- p.current_ver or _pkgs.append(package)
- except KeyError:
- log('Package {} has no installation candidate.'.format(package),
- level='WARNING')
- _pkgs.append(package)
- return _pkgs
-
-
-def apt_cache(in_memory=True):
- """Build and return an apt cache"""
- from apt import apt_pkg
- apt_pkg.init()
- if in_memory:
- apt_pkg.config.set("Dir::Cache::pkgcache", "")
- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
- return apt_pkg.Cache()
-
-
-def apt_install(packages, options=None, fatal=False):
- """Install one or more packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- cmd.append('install')
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Installing {} with options: {}".format(packages,
- options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_upgrade(options=None, fatal=False, dist=False):
- """Upgrade all packages"""
- if options is None:
- options = ['--option=Dpkg::Options::=--force-confold']
-
- cmd = ['apt-get', '--assume-yes']
- cmd.extend(options)
- if dist:
- cmd.append('dist-upgrade')
- else:
- cmd.append('upgrade')
- log("Upgrading with options: {}".format(options))
- _run_apt_command(cmd, fatal)
-
-
-def apt_update(fatal=False):
- """Update local apt cache"""
- cmd = ['apt-get', 'update']
- _run_apt_command(cmd, fatal)
-
-
-def apt_purge(packages, fatal=False):
- """Purge one or more packages"""
- cmd = ['apt-get', '--assume-yes', 'purge']
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Purging {}".format(packages))
- _run_apt_command(cmd, fatal)
-
-
-def apt_mark(packages, mark, fatal=False):
- """Flag one or more packages using apt-mark"""
- cmd = ['apt-mark', mark]
- if isinstance(packages, six.string_types):
- cmd.append(packages)
- else:
- cmd.extend(packages)
- log("Holding {}".format(packages))
-
- if fatal:
- subprocess.check_call(cmd, universal_newlines=True)
- else:
- subprocess.call(cmd, universal_newlines=True)
-
-
-def apt_hold(packages, fatal=False):
- return apt_mark(packages, 'hold', fatal=fatal)
-
-
-def apt_unhold(packages, fatal=False):
- return apt_mark(packages, 'unhold', fatal=fatal)
-
-
-def add_source(source, key=None):
- """Add a package source to this system.
-
- @param source: a URL or sources.list entry, as supported by
- add-apt-repository(1). Examples::
-
- ppa:charmers/example
- deb https://stub:key@private.example.com/ubuntu trusty main
-
- In addition:
- 'proposed:' may be used to enable the standard 'proposed'
- pocket for the release.
- 'cloud:' may be used to activate official cloud archive pockets,
- such as 'cloud:icehouse'
- 'distro' may be used as a noop
-
- @param key: A key to be added to the system's APT keyring and used
- to verify the signatures on packages. Ideally, this should be an
- ASCII format GPG public key including the block headers. A GPG key
- id may also be used, but be aware that only insecure protocols are
- available to retrieve the actual public key from a public keyserver
- placing your Juju environment at risk. ppa and cloud archive keys
- are securely added automtically, so sould not be provided.
- """
- if source is None:
- log('Source is not present. Skipping')
- return
-
- if (source.startswith('ppa:') or
- source.startswith('http') or
- source.startswith('deb ') or
- source.startswith('cloud-archive:')):
- subprocess.check_call(['add-apt-repository', '--yes', source])
- elif source.startswith('cloud:'):
- apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
- fatal=True)
- pocket = source.split(':')[-1]
- if pocket not in CLOUD_ARCHIVE_POCKETS:
- raise SourceConfigError(
- 'Unsupported cloud: source option %s' %
- pocket)
- actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
- apt.write(CLOUD_ARCHIVE.format(actual_pocket))
- elif source == 'proposed':
- release = lsb_release()['DISTRIB_CODENAME']
- with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
- apt.write(PROPOSED_POCKET.format(release))
- elif source == 'distro':
- pass
- else:
- log("Unknown source: {!r}".format(source))
-
- if key:
- if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
- with NamedTemporaryFile('w+') as key_file:
- key_file.write(key)
- key_file.flush()
- key_file.seek(0)
- subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
- elif 'http://' in key:
- with NamedTemporaryFile('w+') as key_file:
- subprocess.check_call(['wget', key, '-O-'], stdout=key_file)
- subprocess.check_call(['apt-key', 'add', key_file.name])
- else:
- # Note that hkp: is in no way a secure protocol. Using a
- # GPG key id is pointless from a security POV unless you
- # absolutely trust your network and DNS.
- subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv',
- key])
-
-
-def configure_sources(update=False,
- sources_var='install_sources',
- keys_var='install_keys'):
- """
- Configure multiple sources from charm configuration.
-
- The lists are encoded as yaml fragments in the configuration.
- The frament needs to be included as a string. Sources and their
- corresponding keys are of the types supported by add_source().
-
- Example config:
- install_sources: |
- - "ppa:foo"
- - "http://example.com/repo precise main"
- install_keys: |
- - null
- - "a1b2c3d4"
-
- Note that 'null' (a.k.a. None) should not be quoted.
- """
- sources = safe_load((config(sources_var) or '').strip()) or []
- keys = safe_load((config(keys_var) or '').strip()) or None
-
- if isinstance(sources, six.string_types):
- sources = [sources]
-
- if keys is None:
- for source in sources:
- add_source(source, None)
- else:
- if isinstance(keys, six.string_types):
- keys = [keys]
-
- if len(sources) != len(keys):
- raise SourceConfigError(
- 'Install sources and keys lists are different lengths')
- for source, key in zip(sources, keys):
- add_source(source, key)
- if update:
- apt_update(fatal=True)
-
-
-def install_remote(source, *args, **kwargs):
- """
- Install a file tree from a remote source
-
- The specified source should be a url of the form:
- scheme://[host]/path[#[option=value][&...]]
-
- Schemes supported are based on this modules submodules.
- Options supported are submodule-specific.
- Additional arguments are passed through to the submodule.
-
- For example::
-
- dest = install_remote('http://example.com/archive.tgz',
- checksum='deadbeef',
- hash_type='sha1')
-
- This will download `archive.tgz`, validate it using SHA1 and, if
- the file is ok, extract it and return the directory in which it
- was extracted. If the checksum fails, it will raise
- :class:`charmhelpers.core.host.ChecksumError`.
- """
- # We ONLY check for True here because can_handle may return a string
- # explaining why it can't handle a given source.
- handlers = [h for h in plugins() if h.can_handle(source) is True]
- installed_to = None
- for handler in handlers:
- try:
- installed_to = handler.install(source, *args, **kwargs)
- except UnhandledSource as e:
- log('Install source attempt unsuccessful: {}'.format(e),
- level='WARNING')
- if not installed_to:
- raise UnhandledSource("No handler found for source {}".format(source))
- return installed_to
-
-
-def install_from_config(config_var_name):
- charm_config = config()
- source = charm_config[config_var_name]
- return install_remote(source)
-
-
-def plugins(fetch_handlers=None):
- if not fetch_handlers:
- fetch_handlers = FETCH_HANDLERS
- plugin_list = []
- for handler_name in fetch_handlers:
- package, classname = handler_name.rsplit('.', 1)
- try:
- handler_class = getattr(
- importlib.import_module(package),
- classname)
- plugin_list.append(handler_class())
- except (ImportError, AttributeError):
- # Skip missing plugins so that they can be ommitted from
- # installation if desired
- log("FetchHandler {} not found, skipping plugin".format(
- handler_name))
- return plugin_list
-
-
-def _run_apt_command(cmd, fatal=False):
- """
- Run an APT command, checking output and retrying if the fatal flag is set
- to True.
-
- :param: cmd: str: The apt command to run.
- :param: fatal: bool: Whether the command's output should be checked and
- retried.
- """
- env = os.environ.copy()
-
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- retry_count = 0
- result = None
-
- # If the command is considered "fatal", we need to retry if the apt
- # lock was not acquired.
-
- while result is None or result == APT_NO_LOCK:
- try:
- result = subprocess.check_call(cmd, env=env)
- except subprocess.CalledProcessError as e:
- retry_count = retry_count + 1
- if retry_count > APT_NO_LOCK_RETRY_COUNT:
- raise
- result = e.returncode
- log("Couldn't acquire DPKG lock. Will retry in {} seconds."
- "".format(APT_NO_LOCK_RETRY_DELAY))
- time.sleep(APT_NO_LOCK_RETRY_DELAY)
-
- else:
- subprocess.call(cmd, env=env)
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/archiveurl.py
deleted file mode 100644
index efd7f9f..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/archiveurl.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import hashlib
-import re
-
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.payload.archive import (
- get_archive_handler,
- extract,
-)
-from charmhelpers.core.host import mkdir, check_hash
-
-import six
-if six.PY3:
- from urllib.request import (
- build_opener, install_opener, urlopen, urlretrieve,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- )
- from urllib.parse import urlparse, urlunparse, parse_qs
- from urllib.error import URLError
-else:
- from urllib import urlretrieve
- from urllib2 import (
- build_opener, install_opener, urlopen,
- HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
- URLError
- )
- from urlparse import urlparse, urlunparse, parse_qs
-
-
-def splituser(host):
- '''urllib.splituser(), but six's support of this seems broken'''
- _userprog = re.compile('^(.*)@(.*)$')
- match = _userprog.match(host)
- if match:
- return match.group(1, 2)
- return None, host
-
-
-def splitpasswd(user):
- '''urllib.splitpasswd(), but six's support of this is missing'''
- _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
- match = _passwdprog.match(user)
- if match:
- return match.group(1, 2)
- return user, None
-
-
-class ArchiveUrlFetchHandler(BaseFetchHandler):
- """
- Handler to download archive files from arbitrary URLs.
-
- Can fetch from http, https, ftp, and file URLs.
-
- Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
-
- Installs the contents of the archive in $CHARM_DIR/fetched/.
- """
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
- # XXX: Why is this returning a boolean and a string? It's
- # doomed to fail since "bool(can_handle('foo://'))" will be True.
- return "Wrong source type"
- if get_archive_handler(self.base_url(source)):
- return True
- return False
-
- def download(self, source, dest):
- """
- Download an archive file.
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local path location to download archive file to.
- """
- # propogate all exceptions
- # URLError, OSError, etc
- proto, netloc, path, params, query, fragment = urlparse(source)
- if proto in ('http', 'https'):
- auth, barehost = splituser(netloc)
- if auth is not None:
- source = urlunparse((proto, barehost, path, params, query, fragment))
- username, password = splitpasswd(auth)
- passman = HTTPPasswordMgrWithDefaultRealm()
- # Realm is set to None in add_password to force the username and password
- # to be used whatever the realm
- passman.add_password(None, source, username, password)
- authhandler = HTTPBasicAuthHandler(passman)
- opener = build_opener(authhandler)
- install_opener(opener)
- response = urlopen(source)
- try:
- with open(dest, 'w') as dest_file:
- dest_file.write(response.read())
- except Exception as e:
- if os.path.isfile(dest):
- os.unlink(dest)
- raise e
-
- # Mandatory file validation via Sha1 or MD5 hashing.
- def download_and_validate(self, url, hashsum, validate="sha1"):
- tempfile, headers = urlretrieve(url)
- check_hash(tempfile, hashsum, validate)
- return tempfile
-
- def install(self, source, dest=None, checksum=None, hash_type='sha1'):
- """
- Download and install an archive file, with optional checksum validation.
-
- The checksum can also be given on the `source` URL's fragment.
- For example::
-
- handler.install('http://example.com/file.tgz#sha1=deadbeef')
-
- :param str source: URL pointing to an archive file.
- :param str dest: Local destination path to install to. If not given,
- installs to `$CHARM_DIR/archives/archive_file_name`.
- :param str checksum: If given, validate the archive file after download.
- :param str hash_type: Algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
-
- """
- url_parts = self.parse_url(source)
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
- try:
- self.download(source, dld_file)
- except URLError as e:
- raise UnhandledSource(e.reason)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- options = parse_qs(url_parts.fragment)
- for key, value in options.items():
- if not six.PY3:
- algorithms = hashlib.algorithms
- else:
- algorithms = hashlib.algorithms_available
- if key in algorithms:
- if len(value) != 1:
- raise TypeError(
- "Expected 1 hash value, not %d" % len(value))
- expected = value[0]
- check_hash(dld_file, expected, key)
- if checksum:
- check_hash(dld_file, checksum, hash_type)
- return extract(dld_file, dest)
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/bzrurl.py
deleted file mode 100644
index 3531315..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/bzrurl.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('bzrlib does not support Python3')
-
-try:
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-bzrlib")
- from bzrlib.branch import Branch
- from bzrlib import bzrdir, workingtree, errors
-
-
-class BzrUrlFetchHandler(BaseFetchHandler):
- """Handler for bazaar branches via generic and lp URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- if url_parts.scheme not in ('bzr+ssh', 'lp'):
- return False
- else:
- return True
-
- def branch(self, source, dest):
- url_parts = self.parse_url(source)
- # If we use lp:branchname scheme we need to load plugins
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
- if url_parts.scheme == "lp":
- from bzrlib.plugin import load_plugins
- load_plugins()
- try:
- local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
- except errors.AlreadyControlDirError:
- local_branch = Branch.open(dest)
- try:
- remote_branch = Branch.open(source)
- remote_branch.push(local_branch)
- tree = workingtree.WorkingTree.open(dest)
- tree.update()
- except Exception as e:
- raise e
-
- def install(self, source):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.branch(source, dest_dir)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/giturl.py
deleted file mode 100644
index f023b26..0000000
--- a/charms/trusty/neutron-contrail/hooks/charmhelpers/fetch/giturl.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# This file is part of charm-helpers.
-#
-# charm-helpers is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License version 3 as
-# published by the Free Software Foundation.
-#
-# charm-helpers is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from charmhelpers.fetch import (
- BaseFetchHandler,
- UnhandledSource
-)
-from charmhelpers.core.host import mkdir
-
-import six
-if six.PY3:
- raise ImportError('GitPython does not support Python 3')
-
-try:
- from git import Repo
-except ImportError:
- from charmhelpers.fetch import apt_install
- apt_install("python-git")
- from git import Repo
-
-from git.exc import GitCommandError # noqa E402
-
-
-class GitUrlFetchHandler(BaseFetchHandler):
- """Handler for git branches via generic and github URLs"""
- def can_handle(self, source):
- url_parts = self.parse_url(source)
- # TODO (mattyw) no support for ssh git@ yet
- if url_parts.scheme not in ('http', 'https', 'git'):
- return False
- else:
- return True
-
- def clone(self, source, dest, branch, depth=None):
- if not self.can_handle(source):
- raise UnhandledSource("Cannot handle {}".format(source))
-
- if depth:
- Repo.clone_from(source, dest, branch=branch, depth=depth)
- else:
- Repo.clone_from(source, dest, branch=branch)
-
- def install(self, source, branch="master", dest=None, depth=None):
- url_parts = self.parse_url(source)
- branch_name = url_parts.path.strip("/").split("/")[-1]
- if dest:
- dest_dir = os.path.join(dest, branch_name)
- else:
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
- branch_name)
- if not os.path.exists(dest_dir):
- mkdir(dest_dir, perms=0o755)
- try:
- self.clone(source, dest_dir, branch, depth)
- except GitCommandError as e:
- raise UnhandledSource(e)
- except OSError as e:
- raise UnhandledSource(e.strerror)
- return dest_dir
diff --git a/charms/trusty/neutron-contrail/hooks/config-changed b/charms/trusty/neutron-contrail/hooks/config-changed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/config-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/contrail-api-relation-broken b/charms/trusty/neutron-contrail/hooks/contrail-api-relation-broken
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/contrail-api-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/contrail-api-relation-changed b/charms/trusty/neutron-contrail/hooks/contrail-api-relation-changed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/contrail-api-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/contrail-api-relation-departed b/charms/trusty/neutron-contrail/hooks/contrail-api-relation-departed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/contrail-api-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-broken b/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-broken
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-changed b/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-changed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-departed b/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-departed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/contrail-discovery-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/control-node-relation-broken b/charms/trusty/neutron-contrail/hooks/control-node-relation-broken
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/control-node-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/control-node-relation-departed b/charms/trusty/neutron-contrail/hooks/control-node-relation-departed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/control-node-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/control-node-relation-joined b/charms/trusty/neutron-contrail/hooks/control-node-relation-joined
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/control-node-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/identity-admin-relation-broken b/charms/trusty/neutron-contrail/hooks/identity-admin-relation-broken
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/identity-admin-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/identity-admin-relation-changed b/charms/trusty/neutron-contrail/hooks/identity-admin-relation-changed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/identity-admin-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/identity-admin-relation-departed b/charms/trusty/neutron-contrail/hooks/identity-admin-relation-departed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/identity-admin-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/install b/charms/trusty/neutron-contrail/hooks/install
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-broken b/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-broken
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-broken
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-changed b/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-changed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-departed b/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-departed
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/neutron-metadata-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/neutron-plugin-relation-joined b/charms/trusty/neutron-contrail/hooks/neutron-plugin-relation-joined
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/neutron-plugin-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/neutron_contrail_hooks.py b/charms/trusty/neutron-contrail/hooks/neutron_contrail_hooks.py
deleted file mode 100755
index a0b013c..0000000
--- a/charms/trusty/neutron-contrail/hooks/neutron_contrail_hooks.py
+++ /dev/null
@@ -1,353 +0,0 @@
-#!/usr/bin/env python
-
-from subprocess import CalledProcessError
-import sys
-
-from apt_pkg import version_compare
-import json
-import uuid
-import yaml
-
-from charmhelpers.core.hookenv import (
- Hooks,
- UnregisteredHookError,
- config,
- is_leader,
- leader_get,
- leader_set,
- log,
- relation_get,
- relation_ids,
- relation_set
-)
-
-from charmhelpers.core.host import (
- restart_on_change,
- service_restart
-)
-
-from charmhelpers.fetch import (
- apt_install,
- apt_upgrade,
- configure_sources
-)
-
-from neutron_contrail_utils import (
- CONTRAIL_VERSION,
- OPENSTACK_VERSION,
- configure_vrouter,
- disable_vrouter_vgw,
- dpkg_version,
- drop_caches,
- enable_vrouter_vgw,
- fix_nodemgr,
- fix_permissions,
- fix_vrouter_scripts,
- ifdown,
- ifup,
- modprobe,
- provision_local_metadata,
- provision_vrouter,
- remove_juju_bridge,
- units,
- unprovision_local_metadata,
- unprovision_vrouter,
- write_barbican_auth_config,
- write_nodemgr_config,
- write_vnc_api_config,
- write_vrouter_config,
- write_vrouter_vgw_interfaces
-)
-
-PACKAGES = [ "contrail-vrouter-dkms", "contrail-vrouter-agent",
- "contrail-nova-driver", "contrail-utils", "python-jinja2",
- "python-netifaces", "python-netaddr", "contrail-nodemgr" ]
-
-PACKAGES_LBAAS = [ "python-barbicanclient", "haproxy" ]
-
-hooks = Hooks()
-config = config()
-
-def check_local_metadata():
- if not is_leader():
- return
-
- if not config.get("vrouter-provisioned"):
- if leader_get("local-metadata-provisioned"):
- # impossible to know if current hook is firing because
- # relation or leader is being removed lp #1469731
- if not relation_ids("cluster"):
- unprovision_local_metadata()
- leader_set({"local-metadata-provisioned": ""})
- return
-
- if config["local-metadata-server"]:
- if not leader_get("local-metadata-provisioned"):
- provision_local_metadata()
- leader_set({"local-metadata-provisioned": True})
- elif leader_get("local-metadata-provisioned"):
- unprovision_local_metadata()
- leader_set({"local-metadata-provisioned": ""})
-
-def check_vrouter():
- # check relation dependencies
- if config_get("contrail-api-ready") \
- and config_get("control-node-ready") \
- and config_get("identity-admin-ready"):
- if not config_get("vrouter-provisioned"):
- provision_vrouter()
- config["vrouter-provisioned"] = True
- elif config_get("vrouter-provisioned"):
- unprovision_vrouter()
- config["vrouter-provisioned"] = False
-
-@hooks.hook("config-changed")
-def config_changed():
- configure_local_metadata()
- configure_virtual_gateways()
- write_config()
- if not units("contrail-discovery") and not units("control-node"):
- config["control-node-ready"] = True if config.get("discovery-server-ip") \
- else False
- if not units("contrail-api"):
- config["contrail-api-ready"] = True if config.get("contrail-api-ip") \
- else False
- check_vrouter()
- check_local_metadata()
-
-def config_get(key):
- try:
- return config[key]
- except KeyError:
- return None
-
-def configure_local_metadata():
- if config["local-metadata-server"]:
- if "local-metadata-secret" not in config:
- # generate secret
- secret = str(uuid.uuid4())
- config["local-metadata-secret"] = secret
- settings = { "metadata-shared-secret": secret }
- # inform relations
- for rid in relation_ids("neutron-plugin"):
- relation_set(relation_id=rid, relation_settings=settings)
- else:
- if "local-metadata-secret" in config:
- # remove secret
- del config["local-metadata-secret"]
- settings = { "metadata-shared-secret": None }
- # inform relations
- for rid in relation_ids("neutron-plugin"):
- relation_set(relation_id=rid, relation_settings=settings)
-
-def configure_virtual_gateways():
- gateways = config.get("virtual-gateways")
- previous_gateways = config_get("virtual-gateways-prev")
- if gateways != previous_gateways:
- # create/destroy virtual gateway interfaces according to new value
- interfaces = { gateway["interface"]: set(gateway["subnets"])
- for gateway in yaml.safe_load(gateways) } \
- if gateways else {}
- previous_interfaces = { gateway["interface"]: set(gateway["subnets"])
- for gateway in yaml.safe_load(previous_gateways) } \
- if previous_gateways else {}
- ifaces = [ interface for interface, subnets in previous_interfaces.iteritems()
- if interface not in interfaces
- or subnets != interfaces[interface] ]
- if ifaces:
- ifdown(ifaces)
-
- write_vrouter_vgw_interfaces()
-
- ifaces = [ interface for interface, subnets in interfaces.iteritems()
- if interface not in previous_interfaces
- or subnets != previous_interfaces[interface] ]
- if ifaces:
- ifup(ifaces)
-
- if interfaces:
- enable_vrouter_vgw()
- else:
- disable_vrouter_vgw()
-
- config["virtual-gateways-prev"] = gateways
-
-@hooks.hook("contrail-api-relation-departed")
-@hooks.hook("contrail-api-relation-broken")
-def contrail_api_departed():
- if not units("contrail-api") and not config.get("contrail-api-ip"):
- config["contrail-api-ready"] = False
- check_vrouter()
- check_local_metadata()
- write_vnc_api_config()
-
-@hooks.hook("contrail-api-relation-changed")
-def contrail_api_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- write_vnc_api_config()
- config["contrail-api-ready"] = True
- check_vrouter()
- check_local_metadata()
-
-@hooks.hook("contrail-discovery-relation-changed")
-def contrail_discovery_changed():
- if not relation_get("port"):
- log("Relation not ready")
- return
- contrail_discovery_relation()
- config["control-node-ready"] = True
- check_vrouter()
- check_local_metadata()
-
-@hooks.hook("contrail-discovery-relation-departed")
-@hooks.hook("contrail-discovery-relation-broken")
-def contrail_discovery_departed():
- if not units("contrail-discovery") \
- and not units("control-node") \
- and not config.get("discovery-server-ip"):
- config["control-node-ready"] = False
- check_vrouter()
- check_local_metadata()
- contrail_discovery_relation()
-
-@restart_on_change({"/etc/contrail/contrail-vrouter-agent.conf": ["contrail-vrouter-agent"],
- "/etc/contrail/contrail-vrouter-nodemgr.conf": ["contrail-vrouter-nodemgr"]})
-def contrail_discovery_relation():
- write_vrouter_config()
- write_nodemgr_config()
-
-@hooks.hook("control-node-relation-departed")
-@hooks.hook("control-node-relation-broken")
-def control_node_departed():
- if not units("control-node") \
- and not units("contrail-discovery") \
- and not config.get("discovery-server-ip"):
- config["control-node-ready"] = False
- check_vrouter()
- check_local_metadata()
- control_node_relation()
-
-@hooks.hook("control-node-relation-joined")
-def control_node_joined():
- control_node_relation()
- config["control-node-ready"] = True
- check_vrouter()
- check_local_metadata()
-
-@restart_on_change({"/etc/contrail/contrail-vrouter-agent.conf": ["contrail-vrouter-agent"]})
-def control_node_relation():
- write_vrouter_config()
-
-@hooks.hook("identity-admin-relation-changed")
-def identity_admin_changed():
- if not relation_get("service_hostname"):
- log("Relation not ready")
- return
- write_vnc_api_config()
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- write_barbican_auth_config()
- config["identity-admin-ready"] = True
- check_vrouter()
- check_local_metadata()
-
-@hooks.hook("identity-admin-relation-departed")
-@hooks.hook("identity-admin-relation-broken")
-def identity_admin_departed():
- if not units("identity-admin"):
- config["identity-admin-ready"] = False
- check_vrouter()
- check_local_metadata()
- write_vnc_api_config()
- if version_compare(CONTRAIL_VERSION, "3.0.2.0-34") >= 0:
- write_barbican_auth_config()
-
-@hooks.hook()
-def install():
- configure_sources(True, "install-sources", "install-keys")
- apt_upgrade(fatal=True, dist=True)
- fix_vrouter_scripts() # bug in 2.0+20141015.1 packages
- apt_install(PACKAGES, fatal=True)
-
- contrail_version = dpkg_version("contrail-vrouter-agent")
- openstack_version = dpkg_version("nova-compute")
- if version_compare(contrail_version, "3.0.2.0-34") >= 0 \
- and version_compare(openstack_version, "2:12.0.0") >= 0:
- # install lbaas packages
- apt_install(PACKAGES_LBAAS, fatal=True)
-
- fix_permissions()
- fix_nodemgr()
- if config.get("remove-juju-bridge"):
- remove_juju_bridge()
- try:
- modprobe("vrouter")
- except CalledProcessError:
- log("vrouter kernel module failed to load, clearing pagecache and retrying")
- drop_caches()
- modprobe("vrouter")
- modprobe("vrouter", True, True)
- configure_vrouter()
- service_restart("nova-compute")
-
-@hooks.hook("neutron-metadata-relation-changed")
-def neutron_metadata_changed():
- if not relation_get("shared-secret"):
- log("Relation not ready")
- return
- neutron_metadata_relation()
-
-@hooks.hook("neutron-metadata-relation-departed")
-@hooks.hook("neutron-metadata-relation-broken")
-@restart_on_change({"/etc/contrail/contrail-vrouter-agent.conf": ["contrail-vrouter-agent"]})
-def neutron_metadata_relation():
- write_vrouter_config()
-
-@hooks.hook("neutron-plugin-relation-joined")
-def neutron_plugin_joined():
- # create plugin config
- section = []
- if version_compare(OPENSTACK_VERSION, "1:2015.1~") < 0:
- if version_compare(OPENSTACK_VERSION, "1:2014.2") >= 0:
- section.append(("network_api_class", "nova_contrail_vif.contrailvif.ContrailNetworkAPI"))
- else:
- section.append(("libvirt_vif_driver", "nova_contrail_vif.contrailvif.VRouterVIFDriver"))
- section.append(("firewall_driver", "nova.virt.firewall.NoopFirewallDriver"))
- conf = {
- "nova-compute": {
- "/etc/nova/nova.conf": {
- "sections": {
- "DEFAULT": section
- }
- }
- }
- }
- relation_set(subordinate_configuration=json.dumps(conf))
-
- if config["local-metadata-server"]:
- settings = { "metadata-shared-secret": config["local-metadata-secret"] }
- relation_set(relation_settings=settings)
-
-def main():
- try:
- hooks.execute(sys.argv)
- except UnregisteredHookError as e:
- log("Unknown hook {} - skipping.".format(e))
-
-@hooks.hook("upgrade-charm")
-def upgrade_charm():
- write_vrouter_config()
- write_vnc_api_config()
- write_nodemgr_config()
- service_restart("supervisor-vrouter")
-
-@restart_on_change({"/etc/contrail/contrail-vrouter-agent.conf": ["contrail-vrouter-agent"],
- "/etc/contrail/contrail-vrouter-nodemgr.conf": ["contrail-vrouter-nodemgr"]})
-def write_config():
- write_vrouter_config()
- write_vnc_api_config()
- write_nodemgr_config()
-
-if __name__ == "__main__":
- main()
diff --git a/charms/trusty/neutron-contrail/hooks/neutron_contrail_utils.py b/charms/trusty/neutron-contrail/hooks/neutron_contrail_utils.py
deleted file mode 100644
index 4bb8002..0000000
--- a/charms/trusty/neutron-contrail/hooks/neutron_contrail_utils.py
+++ /dev/null
@@ -1,478 +0,0 @@
-import functools
-import os
-import pwd
-import shutil
-from socket import gethostbyname, gethostname
-from subprocess import (
- CalledProcessError,
- check_call,
- check_output
-)
-from time import sleep, time
-
-import apt_pkg
-import yaml
-
-import netaddr
-import netifaces
-
-from charmhelpers.core.hookenv import (
- config,
- log,
- related_units,
- relation_get,
- relation_ids,
- relation_type,
- remote_unit
-)
-
-from charmhelpers.core.host import service_restart, service_start
-
-from charmhelpers.core.templating import render
-
-apt_pkg.init()
-
-def dpkg_version(pkg):
- try:
- return check_output(["dpkg-query", "-f", "${Version}\\n", "-W", pkg]).rstrip()
- except CalledProcessError:
- return None
-
-CONTRAIL_VERSION = dpkg_version("contrail-vrouter-agent")
-OPENSTACK_VERSION = dpkg_version("nova-compute")
-
-config = config()
-
-def retry(f=None, timeout=10, delay=2):
- """Retry decorator.
-
- Provides a decorator that can be used to retry a function if it raises
- an exception.
-
- :param timeout: timeout in seconds (default 10)
- :param delay: retry delay in seconds (default 2)
-
- Examples::
-
- # retry fetch_url function
- @retry
- def fetch_url():
- # fetch url
-
- # retry fetch_url function for 60 secs
- @retry(timeout=60)
- def fetch_url():
- # fetch url
- """
- if not f:
- return functools.partial(retry, timeout=timeout, delay=delay)
- @functools.wraps(f)
- def func(*args, **kwargs):
- start = time()
- error = None
- while True:
- try:
- return f(*args, **kwargs)
- except Exception as e:
- error = e
- elapsed = time() - start
- if elapsed >= timeout:
- raise error
- remaining = timeout - elapsed
- if delay <= remaining:
- sleep(delay)
- else:
- sleep(remaining)
- raise error
- return func
-
-def configure_vrouter():
- # run external script to configure vrouter
- args = ["./create-vrouter.sh"]
- iface = config.get("vhost-interface")
- if iface:
- args.append(iface)
- check_call(args, cwd="scripts")
-
-def contrail_api_ctx():
- ip = config.get("contrail-api-ip")
- if ip:
- port = config.get("contrail-api-port")
- return { "api_server": ip,
- "api_port": port if port is not None else 8082 }
-
- ctxs = [ { "api_server": gethostbyname(relation_get("private-address", unit, rid)),
- "api_port": port }
- for rid in relation_ids("contrail-api")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-def contrail_discovery_ctx():
- ip = config.get("discovery-server-ip")
- if ip:
- return { "discovery_server": ip,
- "discovery_port": 5998 }
-
- ctxs = [ { "discovery_server": vip if vip \
- else gethostbyname(relation_get("private-address", unit, rid)),
- "discovery_port": port }
- for rid in relation_ids("contrail-discovery")
- for unit, port, vip in
- ((unit, relation_get("port", unit, rid), relation_get("vip", unit, rid))
- for unit in related_units(rid))
- if port ]
- return ctxs[0] if ctxs else {}
-
-@retry(timeout=300)
-def contrail_provision_linklocal(api_ip, api_port, service_name, service_ip,
- service_port, fabric_ip, fabric_port, op,
- user, password):
- check_call(["contrail-provision-linklocal",
- "--api_server_ip", api_ip,
- "--api_server_port", str(api_port),
- "--linklocal_service_name", service_name,
- "--linklocal_service_ip", service_ip,
- "--linklocal_service_port", str(service_port),
- "--ipfabric_service_ip", fabric_ip,
- "--ipfabric_service_port", str(fabric_port),
- "--oper", op,
- "--admin_user", user,
- "--admin_password", password])
-
-@retry(timeout=300)
-def contrail_provision_vrouter(hostname, ip, api_ip, api_port, op,
- user, password, tenant):
- check_call(["contrail-provision-vrouter",
- "--host_name", hostname,
- "--host_ip", ip,
- "--api_server_ip", api_ip,
- "--api_server_port", str(api_port),
- "--oper", op,
- "--admin_user", user,
- "--admin_password", password,
- "--admin_tenant_name", tenant])
-
-def control_node_ctx():
- return { "control_nodes": [ gethostbyname(relation_get("private-address", unit, rid))
- for rid in relation_ids("control-node")
- for unit in related_units(rid) ] }
-
-def disable_vrouter_vgw():
- if os.path.exists("/etc/sysctl.d/60-vrouter-vgw.conf"):
- # unset sysctl options
- os.remove("/etc/sysctl.d/60-vrouter-vgw.conf")
- check_call(["sysctl", "-qw", "net.ipv4.ip_forward=0"])
-
-def drop_caches():
- """Clears OS pagecache"""
- log("Clearing pagecache")
- check_call(["sync"])
- with open("/proc/sys/vm/drop_caches", "w") as f:
- f.write("3\n")
-
-def enable_vrouter_vgw():
- if not os.path.exists("/etc/sysctl.d/60-vrouter-vgw.conf"):
- # set sysctl options
- shutil.copy("files/60-vrouter-vgw.conf", "/etc/sysctl.d")
- service_start("procps")
-
-def fix_nodemgr():
- # add files missing from contrail-nodemgr package
- shutil.copy("files/contrail-nodemgr-vrouter.ini",
- "/etc/contrail/supervisord_vrouter_files")
- pw = pwd.getpwnam("contrail")
- os.chown("/etc/contrail/supervisord_vrouter_files/contrail-nodemgr-vrouter.ini",
- pw.pw_uid, pw.pw_gid)
- shutil.copy("files/contrail-vrouter.rules",
- "/etc/contrail/supervisord_vrouter_files")
- os.chown("/etc/contrail/supervisord_vrouter_files/contrail-vrouter.rules",
- pw.pw_uid, pw.pw_gid)
- shutil.copy("files/contrail-vrouter-nodemgr", "/etc/init.d")
- os.chmod("/etc/init.d/contrail-vrouter-nodemgr", 0755)
- service_restart("supervisor-vrouter")
-
-def fix_permissions():
- os.chmod("/etc/contrail", 0755)
- os.chown("/etc/contrail", 0, 0)
-
-def fix_vrouter_scripts():
- # certain files need to be present for packages
- if not os.path.exists("/opt/contrail/bin"):
- os.makedirs("/opt/contrail/bin")
- os.symlink("/bin/true", "/opt/contrail/bin/vrouter-pre-start.sh")
- os.symlink("/bin/true", "/opt/contrail/bin/vrouter-post-start.sh")
- os.symlink("/bin/true", "/opt/contrail/bin/vrouter-pre-stop.sh")
-
-def identity_admin_ctx():
- ctxs = [ { "auth_host": gethostbyname(hostname),
- "auth_port": relation_get("service_port", unit, rid),
- "admin_user": relation_get("service_username", unit, rid),
- "admin_password": relation_get("service_password", unit, rid),
- "admin_tenant_name": relation_get("service_tenant_name", unit, rid),
- "auth_region": relation_get("service_region", unit, rid) }
- for rid in relation_ids("identity-admin")
- for unit, hostname in
- ((unit, relation_get("service_hostname", unit, rid)) for unit in related_units(rid))
- if hostname ]
- return ctxs[0] if ctxs else {}
-
-def ifdown(interfaces=None):
- """ifdown an interface or all interfaces"""
- log("Taking down {}".format(interfaces if interfaces else "interfaces"))
- check_call(["ifdown"] + interfaces if interfaces else ["-a"])
-
-def ifup(interfaces=None):
- """ifup an interface or all interfaces"""
- log("Bringing up {}".format(interfaces if interfaces else "interfaces"))
- check_call(["ifup"] + interfaces if interfaces else ["-a"])
-
-def lsmod(module):
- """Check if a kernel module is loaded"""
- with open("/proc/modules", "r") as modules:
- for line in modules:
- if line.split()[0] == module:
- return True
- return False
-
-def modprobe(module, auto_load=False, dkms_autoinstall=False):
- """Load a kernel module.
-
- Allows loading of a kernel module.
-
- 'dkms_autoinstall' is useful for DKMS kernel modules. Juju often upgrades
- units to newer kernels before charm install, which won't be used until the
- machine is rebooted. In these cases, some modules may not be compiled for
- the newer kernel. Setting this argument to True will ensure these modules
- are compiled for newer kernels.
-
- :param module: module to load
- :param auto_load: load module on boot (default False)
- :param dkms_autoinstall: invoke DKMS autoinstall for other kernels
- (default False)
- """
- if not lsmod(module):
- log("Loading kernel module {}".format(module))
- check_call(["modprobe", module])
- if auto_load:
- with open("/etc/modules", "a") as modules:
- modules.write(module)
- modules.write("\n")
- if dkms_autoinstall:
- current = check_output(["uname", "-r"]).rstrip()
- for kernel in os.listdir("/lib/modules"):
- if kernel == current:
- continue
- log("DKMS auto installing for kernel {}".format(kernel))
- check_call(["dkms", "autoinstall", "-k", kernel])
-
-def network_ctx():
- iface = config.get("control-interface")
- return { "control_network_ip": netifaces.ifaddresses(iface)[netifaces.AF_INET][0]["addr"] }
-
-def neutron_metadata_ctx():
- if "local-metadata-secret" in config:
- return { "metadata_secret": config["local-metadata-secret"] }
-
- ctxs = [ { "metadata_secret": relation_get("shared-secret", unit, rid) }
- for rid in relation_ids("neutron-metadata")
- for unit in related_units(rid) ]
- return ctxs[0] if ctxs else {}
-
-def provision_local_metadata():
- api_port = None
- api_ip = config.get("contrail-api-ip")
- if api_ip:
- api_port = config.get("contrail-api-port")
- if api_port is None:
- api_port = 8082
- else:
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- port)
- for rid in relation_ids("contrail-api")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ][0]
- user, password = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid)
- if relation_get("service_hostname", unit, rid) ][0]
- log("Provisioning local metadata service 127.0.0.1:8775")
- contrail_provision_linklocal(api_ip, api_port, "metadata",
- "169.254.169.254", 80, "127.0.0.1", 8775,
- "add", user, password)
-
-def provision_vrouter():
- hostname = gethostname()
- ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
- api_port = None
- api_ip = config.get("contrail-api-ip")
- if api_ip:
- api_port = config.get("contrail-api-port")
- if api_port is None:
- api_port = 8082
- else:
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- port)
- for rid in relation_ids("contrail-api")
- for unit, port in
- ((unit, relation_get("port", unit, rid)) for unit in related_units(rid))
- if port ][0]
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid)
- if relation_get("service_hostname", unit, rid) ][0]
- log("Provisioning vrouter {}".format(ip))
- contrail_provision_vrouter(hostname, ip, api_ip, api_port, "add",
- user, password, tenant)
-
-def remove_juju_bridge():
- # run external script to remove bridge
- check_call(["./remove-juju-bridge.sh"], cwd="scripts")
-
-def units(relation):
- """Return a list of units for the specified relation"""
- return [ unit for rid in relation_ids(relation)
- for unit in related_units(rid) ]
-
-def unprovision_local_metadata():
- relation = relation_type()
- if relation and not remote_unit():
- return
- api_ip = config.previous("contrail-api-ip")
- api_port = None
- if api_ip:
- api_port = config.previous("contrail-api-port")
- if api_port is None:
- api_port = 8082
- elif relation == "contrail-api":
- api_ip = gethostbyname(relation_get("private-address"))
- api_port = relation_get("port")
- else:
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- relation_get("port", unit, rid))
- for rid in relation_ids("contrail-api")
- for unit in related_units(rid) ][0]
- user = None
- password = None
- if relation == "identity-admin":
- user = relation_get("service_username")
- password = relation_get("service_password")
- else:
- user, password = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Unprovisioning local metadata service 127.0.0.1:8775")
- contrail_provision_linklocal(api_ip, api_port, "metadata",
- "169.254.169.254", 80, "127.0.0.1", 8775,
- "del", user, password)
-
-def unprovision_vrouter():
- relation = relation_type()
- if relation and not remote_unit():
- return
- hostname = gethostname()
- ip = netifaces.ifaddresses("vhost0")[netifaces.AF_INET][0]["addr"]
- api_ip = config.previous("contrail-api-ip")
- api_port = None
- if api_ip:
- api_port = config.previous("contrail-api-port")
- if api_port is None:
- api_port = 8082
- elif relation == "contrail-api":
- api_ip = gethostbyname(relation_get("private-address"))
- api_port = relation_get("port")
- else:
- api_ip, api_port = [ (gethostbyname(relation_get("private-address", unit, rid)),
- relation_get("port", unit, rid))
- for rid in relation_ids("contrail-api")
- for unit in related_units(rid) ][0]
- user = None
- password = None
- tenant = None
- if relation == "identity-admin":
- user = relation_get("service_username")
- password = relation_get("service_password")
- tenant = relation_get("service_tenant_name")
- else:
- user, password, tenant = [ (relation_get("service_username", unit, rid),
- relation_get("service_password", unit, rid),
- relation_get("service_tenant_name", unit, rid))
- for rid in relation_ids("identity-admin")
- for unit in related_units(rid) ][0]
- log("Unprovisioning vrouter {}".format(ip))
- contrail_provision_vrouter(hostname, ip, api_ip, api_port, "del",
- user, password, tenant)
-
-def vhost_gateway():
- # determine vhost gateway
- gateway = config.get("vhost-gateway")
- if gateway == "auto":
- for line in check_output(["route", "-n"]).splitlines()[2:]:
- l = line.split()
- if "G" in l[3] and l[7] == "vhost0":
- return l[1]
- gateway = None
- return gateway
-
-def vhost_ip(iface):
- # return a vhost formatted address and mask - x.x.x.x/xx
- addr = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]
- ip = addr["addr"]
- cidr = netaddr.IPNetwork(ip + "/" + addr["netmask"]).prefixlen
- return ip + "/" + str(cidr)
-
-def vhost_phys():
- # run external script to determine physical interface of vhost0
- return check_output(["scripts/vhost-phys.sh"]).rstrip()
-
-def vrouter_ctx():
- return { "vhost_ip": vhost_ip("vhost0"),
- "vhost_gateway": vhost_gateway(),
- "vhost_physical": vhost_phys() }
-
-def vrouter_vgw_ctx():
- ctx = {}
- vgws = config.get("virtual-gateways")
- if vgws:
- vgws = yaml.safe_load(vgws)
- map(lambda item: item.update(domain="default-domain"), vgws)
- ctx["vgws"] = vgws
- return ctx
-
-def write_barbican_auth_config():
- ctx = identity_admin_ctx()
- render("contrail-barbican-auth.conf",
- "/etc/contrail/contrail-barbican-auth.conf", ctx, "root", "contrail",
- 0440)
-
-def write_nodemgr_config():
- ctx = contrail_discovery_ctx()
- render("contrail-vrouter-nodemgr.conf",
- "/etc/contrail/contrail-vrouter-nodemgr.conf", ctx)
-
-def write_vnc_api_config():
- ctx = {}
- ctx.update(contrail_api_ctx())
- ctx.update(identity_admin_ctx())
- render("vnc_api_lib.ini", "/etc/contrail/vnc_api_lib.ini", ctx)
-
-def write_vrouter_config():
- ctx = {}
- ctx.update(control_node_ctx())
- ctx.update(contrail_discovery_ctx())
- ctx.update(neutron_metadata_ctx())
- ctx.update(network_ctx())
- ctx.update(vrouter_ctx())
- ctx.update(vrouter_vgw_ctx())
- render("contrail-vrouter-agent.conf",
- "/etc/contrail/contrail-vrouter-agent.conf", ctx, perms=0440)
-
-def write_vrouter_vgw_interfaces():
- ctx = vrouter_vgw_ctx()
- render("vrouter-vgw.cfg", "/etc/network/interfaces.d/vrouter-vgw.cfg", ctx)
diff --git a/charms/trusty/neutron-contrail/hooks/start b/charms/trusty/neutron-contrail/hooks/start
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/stop b/charms/trusty/neutron-contrail/hooks/stop
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/hooks/upgrade-charm b/charms/trusty/neutron-contrail/hooks/upgrade-charm
deleted file mode 120000
index 930b3fb..0000000
--- a/charms/trusty/neutron-contrail/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-neutron_contrail_hooks.py \ No newline at end of file
diff --git a/charms/trusty/neutron-contrail/icon.svg b/charms/trusty/neutron-contrail/icon.svg
deleted file mode 100644
index 6f77c1a..0000000
--- a/charms/trusty/neutron-contrail/icon.svg
+++ /dev/null
@@ -1,309 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.91 r13725"
- sodipodi:docname="icon.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="4.0745362"
- inkscape:cx="48.413329"
- inkscape:cy="49.018169"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1920"
- inkscape:window-height="1025"
- inkscape:window-x="0"
- inkscape:window-y="27"
- inkscape:window-maximized="1"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#ebebeb;fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACE YOUR PICTOGRAM HERE"
- style="display:inline">
- <g
- style="display:inline"
- transform="matrix(0.30759127,0,0,0.30759127,8.28218,8.97257)"
- id="g3732">
- <path
- style="fill:#a3cfe8"
- d="M 95,165.62616 C 84.317392,162.68522 76.316695,156.3432 71.320441,146.85577 68.731857,141.94027 68.5,140.61329 68.5,130.71353 c 0,-11.83269 0.397793,-12.66977 6.034392,-12.69822 C 78.926707,117.99315 81,121.97863 81,130.44413 c 0,9.5666 3.34886,15.50194 11.662711,20.67036 3.651393,2.26995 4.798754,2.40131 23.683989,2.71173 l 19.8467,0.32623 -0.71218,2.17377 c -0.91082,2.78009 -0.90418,5.58369 0.0199,8.42378 l 0.73211,2.25 -18.36663,-0.0675 C 106.56201,166.89096 97.76974,166.38867 95,165.62616 Z m 46.00868,-0.11571 c -1.77687,-2.14099 -1.82625,-7.82041 -0.0862,-9.917 1.07681,-1.29747 3.57513,-1.59374 13.45,-1.595 9.54779,-0.001 12.86912,-0.37349 15.61365,-1.75 9.3963,-4.71272 7.35301,-19.21115 -2.93942,-20.85698 -2.07398,-0.33164 -4.19534,-0.89289 -4.71413,-1.24723 -0.51879,-0.35433 -1.44954,-3.43526 -2.06833,-6.84652 -1.37797,-7.59639 -3.48916,-12.20669 -7.30276,-15.94738 -3.66382,-3.59378 -3.6595,-4.21104 0.0385,-5.50018 2.54055,-0.88564 3,-1.56686 3,-4.447985 0,-4.258462 1.35388,-4.297632 5.25974,-0.152175 4.55275,4.83203 8.57589,11.55276 10.42257,17.41111 1.15326,3.65858 2.26012,5.35908 3.72889,5.72883 3.21482,0.8093 9.54053,7.29049 11.64977,11.9361 2.26213,4.98232 2.53846,14.30356 0.56413,19.02881 -1.97355,4.72336 -7.28419,10.42159 -12.03042,12.90844 -3.50369,1.8358 -6.19345,2.20312 -18.636,2.54499 -12.76506,0.35072 -14.7134,0.19219 -15.95,-1.29783 z M 36.760565,161.75 c -3.478655,-4.56459 -7.187084,-12.21027 -9.336932,-19.25 -2.778434,-9.09804 -2.583706,-24.94034 0.417306,-33.95043 3.497444,-10.500559 9.898641,-21.56636 12.457102,-21.534693 0.661077,0.0082 2.925911,1.473635 5.032964,3.256562 l 3.831004,3.241685 -2.568452,5.113673 C 42.599304,106.57918 40.65102,115.46967 40.594928,126 c -0.0579,10.86969 1.439444,17.99787 5.535634,26.35262 1.578191,3.21895 2.85983,6.14395 2.848087,6.5 C 48.949775,159.72808 41.428955,165 40.208913,165 c -0.534344,0 -2.086101,-1.4625 -3.448348,-3.25 z m 175.995035,-0.0376 -3.7444,-3.21245 1.79249,-3 c 8.93434,-14.95294 9.53034,-38.50427 1.41338,-55.849827 l -3.07866,-6.578941 4.1278,-3.035616 C 215.5365,88.366027 217.71535,87 218.10811,87 c 1.50502,0 6.33619,6.757331 8.97827,12.55785 7.79191,17.10669 7.87368,37.40315 0.21328,52.94215 -2.91602,5.91511 -7.82715,12.49548 -9.29966,12.46052 -0.825,-0.0196 -3.18498,-1.48122 -5.2444,-3.24807 z M 81.482645,115.96644 c -1.483807,-2.86937 -1.949857,-3.10137 -5.058516,-2.51818 -4.663007,0.87478 -4.493442,-0.95188 0.628511,-6.77072 5.256509,-5.97171 14.327595,-10.460488 22.924736,-11.34418 4.557714,-0.468483 7.786604,-1.496091 10.894994,-3.467375 10.33444,-6.553906 24.98246,-8.287165 35.62763,-4.215718 4.82222,1.84435 5,2.051462 5,5.824988 0,3.32368 -0.46902,4.186565 -3.11582,5.732379 -2.93452,1.713856 -3.47765,1.727036 -9.3345,0.226582 -5.19732,-1.331492 -7.06708,-1.394156 -11.38418,-0.381538 -6.35168,1.489842 -8.08332,2.337822 -13.18203,6.455152 -3.63495,2.93531 -4.49954,3.19704 -9.10062,2.75494 -6.189167,-0.59471 -12.218344,1.78693 -18.196739,7.18806 l -4.06908,3.67616 -1.634386,-3.16055 z"
- id="path3746"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#9a9a9c"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 35.40716,159.29417 c -2.083023,-3.13821 -5.109308,-9.54119 -6.725077,-14.22886 -2.485242,-7.21018 -2.938617,-10.06664 -2.943307,-18.54417 -0.0036,-6.59373 0.591734,-12.07325 1.74079,-16.02114 2.125307,-7.30206 7.833992,-18.506493 10.893586,-21.380833 l 2.245692,-2.109718 4.114129,3.025565 4.114129,3.025564 -2.940589,6.48533 c -7.687874,16.955242 -7.684823,36.645922 0.0082,53.085582 l 2.95122,6.30662 -3.826883,3.03094 C 42.934289,163.63607 40.758205,165 40.203333,165 c -0.554872,0 -2.71315,-2.56762 -4.796173,-5.70583 z m 178.33231,2.91881 c -4.12643,-2.97696 -4.12127,-2.77305 -0.30142,-11.89827 C 216.73845,142.43037 218,135.70645 218,126 c 0,-9.70412 -1.26117,-16.4284 -4.56034,-24.31471 -1.42316,-3.401907 -2.66678,-6.795138 -2.76361,-7.540509 -0.0968,-0.74537 1.55376,-2.77037 3.66797,-4.5 L 218.18803,86.5 l 2.46357,3 c 10.21069,12.43401 14.79345,33.98475 10.72523,50.43611 -2.37412,9.60065 -10.56942,25.165 -13.17772,25.02687 -0.38451,-0.0204 -2.39135,-1.25787 -4.45964,-2.75 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3744"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#50a1d2"
- d="m 93.286039,164.54925 c -16.494387,-5.15489 -26.958648,-21.00658 -24.875196,-37.68196 0.843223,-6.74892 1.329136,-7.48226 5.337762,-8.05574 4.602358,-0.65842 6.634722,2.66079 6.356138,10.38072 -0.355642,9.8553 5.007342,19.02839 13.395257,22.91187 3.449975,1.59728 6.65053,1.85496 23.27568,1.8739 l 19.27568,0.022 -1.5223,2.9438 c -1.13702,2.19876 -1.27006,3.60722 -0.52568,5.5651 0.54814,1.44171 0.99662,2.817 0.99662,3.0562 0,1.13237 -37.784447,0.21221 -41.713961,-1.01585 z M 140.3757,163.25 c -0.75749,-2.06167 -0.6343,-3.56348 0.49217,-6 l 1.50255,-3.25 12.9105,0 c 14.6294,0 17.5288,-0.97189 20.29597,-6.80328 3.45454,-7.27989 -1.32251,-15.43619 -9.78395,-16.70506 l -4.53221,-0.67965 -0.51854,-5.71858 c -0.55357,-6.10485 -4.15117,-14.35103 -7.6341,-17.49842 -2.70447,-2.44391 -2.6528,-3.02579 0.39191,-4.41306 1.58875,-0.72388 2.50558,-1.96702 2.51531,-3.410511 0.008,-1.249292 0.39216,-2.865775 0.85274,-3.592185 C 158.67512,92.329247 172,111.55317 172,117.01025 c 0,0.94756 2.19487,3.0552 4.99312,4.79469 16.07824,9.99478 15.53196,32.74917 -0.99499,41.44506 -5.0138,2.63808 -5.82451,2.75 -19.91928,2.75 l -14.69277,0 -1.01038,-2.75 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.841186,115.55079 c -0.878315,-1.9277 -1.99166,-2.51327 -5.228562,-2.75 L 72.5,112.5 77.225927,107.42203 C 83.456988,100.72681 89.946931,97.312559 99.091117,95.919125 103.166,95.298175 107.175,94.376154 108,93.87019 c 0.825,-0.505965 4.40457,-2.344245 7.95461,-4.085068 8.22915,-4.035307 19.81365,-4.987772 28.27907,-2.325071 7.55962,2.37779 7.79351,2.597566 7.12811,6.697941 C 150.57502,99.006294 146.1878,101.20891 141,99.36016 132.99683,96.508113 122.06502,98.684599 115.29736,104.47747 111.53712,107.6961 110.64067,108 104.90676,108 97.846719,108 92.517648,110.09663 87.188282,114.97101 85.366837,116.63695 83.669689,118 83.416843,118 c -0.252846,0 -0.961892,-1.10215 -1.575657,-2.44921 z"
- id="path3742"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#258bc8"
- d="m 140.94241,163.34852 c -0.60534,-1.59216 -0.6633,-3.68963 -0.14507,-5.25 0.8603,-2.5903 0.90545,-2.60011 14.28284,-3.09996 7.93908,-0.29664 14.30706,-1.00877 15.59227,-1.74367 10.44037,-5.96999 7.38458,-21.04866 -4.67245,-23.05598 l -4.5,-0.74919 -0.58702,-5.97486 c -0.62455,-6.35693 -3.09323,-12.09225 -7.29978,-16.95905 l -2.57934,-2.98419 2.20484,-0.81562 c 2.73303,-1.01102 3.71477,-2.49335 3.78569,-5.716 0.0511,-2.322172 0.38375,-2.144343 4.67651,2.5 4.32664,4.681 10.2991,15.64731 10.2991,18.91066 0,0.80001 0.94975,1.756 2.11054,2.12443 3.25146,1.03197 9.8171,7.40275 11.96188,11.60686 2.54215,4.98304 2.56222,14.86412 0.0414,20.41386 -2.26808,4.99343 -8.79666,10.73297 -13.97231,12.28363 C 170.01108,165.47775 162.34653,166 155.10923,166 l -13.15873,0 -1.00809,-2.65148 z M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.664567,115.0093 c -1.516672,-2.56752 -2.095101,-2.81369 -5.364599,-2.28313 l -3.66463,0.59469 2.22168,-3.12006 C 80.37626,102.44974 90.120126,97.000633 99.857357,96.219746 105.13094,95.796826 107.53051,95.01192 111.5,92.411404 c 10.08936,-6.609802 24.47284,-8.157994 35.30015,-3.799597 4.05392,1.631857 4.28296,1.935471 4,5.302479 -0.41543,4.943233 -3.85308,6.604794 -10.30411,4.980399 -9.07108,-2.284124 -18.26402,-0.195093 -26.41897,6.003525 -2.78485,2.11679 -4.55576,2.61322 -9.5,2.66311 -6.674981,0.0673 -12.069467,2.29808 -17.866999,7.38838 l -3.345536,2.93742 -1.699968,-2.87782 z"
- id="path3740"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#6c6d71"
- d="M 36.924699,160.79198 C 33.485946,156.10457 30.687068,150.24942 28.180767,142.5 c -2.22154,-6.86895 -2.214797,-26.11727 0.01161,-33.13024 2.21057,-6.96308 6.348289,-15.18965 9.611074,-19.108624 L 40.5,87.022271 l 3.875471,3.282759 3.875472,3.282758 -2.18708,4.287031 c -7.653476,15.002051 -8.071995,38.329351 -0.968739,53.995241 3.168854,6.98876 3.078371,7.44609 -2.21963,11.2186 l -2.802135,1.99529 -3.14866,-4.29197 z m 177.289621,1.13424 -4.17969,-3.07377 1.95557,-3.83324 c 5.55817,-10.89491 7.78283,-24.62144 6.0729,-37.4708 -0.61859,-4.64838 -1.81396,-10.16088 -2.65638,-12.25 -1.54072,-3.82085 -4.3711,-10.259911 -5.02182,-11.424556 -0.6119,-1.095168 7.44846,-6.09488 8.63936,-5.35886 2.42142,1.496519 8.05598,11.676956 10.60291,19.157176 3.82818,11.24317 3.81121,25.44418 -0.044,36.82783 -2.07525,6.12777 -9.78971,20.5 -11.00362,20.5 -0.10204,0 -2.06639,-1.3832 -4.36522,-3.07378 z M 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3738"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0076c2"
- d="m 81.778822,114.41391 c -0.987352,-2.167 -1.713119,-2.52365 -4.478561,-2.2008 C 75.485117,112.42502 74,112.28006 74,111.89098 c 0,-0.38909 2.038348,-2.80473 4.529662,-5.36811 5.687016,-5.85151 13.385461,-9.421936 22.389748,-10.384041 4.19603,-0.448345 7.72119,-1.408591 8.81929,-2.402352 1.0061,-0.910509 4.51398,-2.848867 7.79529,-4.307463 11.5167,-5.119364 33.48865,-2.808232 33.4507,3.51853 -0.03,5.002939 -4.29101,7.838526 -9.20479,6.125573 -1.69309,-0.590214 -6.0487,-1.063234 -9.67912,-1.051155 -7.46196,0.02483 -12.78325,2.004318 -18.21979,6.777668 -3.02474,2.65576 -4.03125,2.9899 -7.5746,2.51464 -5.45614,-0.73182 -12.97717,1.85611 -18.074646,6.21936 -2.22732,1.9065 -4.325286,3.46637 -4.662147,3.46637 -0.336861,0 -1.14271,-1.16374 -1.790775,-2.58609 z"
- id="path3736"
- inkscape:connector-curvature="0" />
- <path
- style="fill:#0275bc"
- d="m 84,115.94098 c 0,-0.58246 -0.519529,-0.73793 -1.154508,-0.34549 -0.691266,0.42723 -0.883989,0.27582 -0.48031,-0.37735 0.370809,-0.59998 1.542397,-1.02548 2.603528,-0.94554 1.457446,0.10978 1.667267,0.4611 0.857865,1.43636 C 84.525185,117.27704 84,117.34375 84,115.94098 Z m 0.09671,-3.86005 c -1.011759,-0.64056 -0.689769,-0.84554 1.15404,-0.73469 1.406534,0.0846 2.348958,0.49126 2.094276,0.90376 -0.60193,0.97493 -1.516575,0.92732 -3.248316,-0.16907 z m 6.3078,-0.92642 c 0.398903,-0.64544 0.136326,-1.16792 -0.595491,-1.18492 -0.765174,-0.0178 -0.541923,-0.47628 0.537358,-1.10362 1.338377,-0.77794 2.163776,-0.75328 3,0.0896 0.874885,0.8819 0.691151,0.98669 -0.76042,0.43369 -1.280472,-0.48782 -1.688838,-0.3648 -1.233688,0.37165 0.374196,0.60547 0.153488,1.42647 -0.490464,1.82445 -0.731227,0.45192 -0.902922,0.29014 -0.457295,-0.4309 z M 78.5,109.91171 l -3,-0.7763 3.217276,0.16818 c 2.186877,0.11431 3.688589,-0.46785 4.688882,-1.81771 1.457369,-1.96667 1.489127,-1.96706 3.282724,-0.0406 1.583464,1.70072 1.591856,1.78019 0.06676,0.63224 -1.483392,-1.11656 -2.007002,-1.0195 -3.5,0.64877 -1.381497,1.54369 -2.394984,1.79632 -4.755647,1.18547 z M 78.5,107 c -0.60158,-0.97338 0.120084,-1.39478 1.85526,-1.08333 1.302991,0.23387 3.690445,-2.0337 3.117418,-2.96088 -0.277916,-0.44968 0.02157,-1.14322 0.665519,-1.5412 0.731227,-0.45192 0.902922,-0.29014 0.457295,0.4309 -1.008441,1.63169 1.517118,1.38391 3.845638,-0.37729 1.067621,-0.80751 2.867621,-1.42334 4,-1.36852 2.027174,0.0981 2.02808,0.11053 0.05887,0.80463 -4.600356,1.62151 -9.243399,4.08158 -10.452051,5.53791 C 80.556518,108.23929 79.380215,108.42422 78.5,107 Z m 12.25,-0.66228 c 0.6875,-0.27741 1.8125,-0.27741 2.5,0 0.6875,0.27741 0.125,0.50439 -1.25,0.50439 -1.375,0 -1.9375,-0.22698 -1.25,-0.50439 z m -1.953895,-1.90746 c 1.232615,-0.86336 3.020243,-1.36556 3.972506,-1.116 1.314258,0.34442 1.203531,0.48168 -0.459594,0.56974 -1.205041,0.0638 -2.469098,0.566 -2.809017,1.116 -0.339919,0.55 -1.141604,1 -1.781523,1 -0.639919,0 -0.154987,-0.70638 1.077628,-1.56974 z m 12.467645,-0.14784 c 1.52006,-0.22986 3.77006,-0.22371 5,0.0136 1.22994,0.23736 -0.0138,0.42542 -2.76375,0.41792 -2.75,-0.008 -3.756313,-0.20172 -2.23625,-0.43157 z m 13.52519,-3.66627 c 1.62643,-1.858573 1.61751,-1.921032 -0.18038,-1.262823 -1.58361,0.579759 -1.69145,0.451477 -0.6626,-0.788214 0.96581,-1.163733 1.50975,-1.222146 2.54116,-0.272892 0.80101,0.737212 0.96515,1.63324 0.42127,2.299789 -0.49007,0.6006 -0.69137,1.29168 -0.44733,1.53571 0.24403,0.24404 -0.41735,0.44371 -1.46974,0.44371 -1.81559,0 -1.82594,-0.1 -0.20238,-1.95528 z m -13.35766,0.48689 c 1.8068,-0.70764 6.56872,-0.33535 6.56872,0.51354 0,0.21088 -1.9125,0.35179 -4.25,0.31313 -3.00669,-0.0497 -3.68502,-0.29156 -2.31872,-0.82667 z M 120,98.984687 c -1.33333,-0.875277 -1.33333,-1.094097 0,-1.969374 0.825,-0.541578 2.175,-0.939378 3,-0.883999 0.99463,0.06677 0.88566,0.259531 -0.32343,0.572152 -1.07213,0.27721 -1.60009,1.05346 -1.28138,1.883999 0.63873,1.664515 0.5666,1.685055 -1.39519,0.397222 z m 23.8125,0.332199 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z M 100,98.073324 c 0,-0.509672 -0.7875,-1.132471 -1.75,-1.383998 -1.31691,-0.344145 -1.19317,-0.486031 0.5,-0.573325 1.2375,-0.0638 2.25,0.305488 2.25,0.820641 0,0.515152 1.4625,1.118136 3.25,1.339962 3.19982,0.397095 3.1921,0.405793 -0.5,0.563359 -2.0625,0.08802 -3.75,-0.256967 -3.75,-0.766639 z m 29.75,-0.79672 c 1.7875,-0.221826 4.7125,-0.221826 6.5,0 1.7875,0.221827 0.325,0.403322 -3.25,0.403322 -3.575,0 -5.0375,-0.181495 -3.25,-0.403322 z M 142.5,97 c -1.75921,-0.755957 -1.6618,-0.867892 0.80902,-0.929715 1.63221,-0.04084 2.5501,0.348653 2.19098,0.929715 -0.33992,0.55 -0.70398,0.968372 -0.80902,0.929715 C 144.58594,97.891058 143.6,97.472686 142.5,97 Z m -32.85536,-1.199796 c 0.45361,-0.715112 0.83163,-1.600204 0.84005,-1.966871 0.008,-0.366666 0.42496,-1.041666 0.92564,-1.5 0.52889,-0.484163 0.60891,-0.309578 0.19098,0.416667 -0.93393,1.62288 0.27843,1.533702 3.39869,-0.25 2.99559,-1.712435 4,-1.837986 4,-0.5 0,0.55 -0.56916,1 -1.26481,1 -0.69564,0 -2.98616,0.922592 -5.09004,2.050204 -2.18676,1.172033 -3.47198,1.493283 -3.00051,0.75 z M 147,95.559017 C 147,94.701558 147.45,94 148,94 c 0.55,0 1,0.423442 1,0.940983 0,0.517541 -0.45,1.219098 -1,1.559017 -0.55,0.339919 -1,-0.08352 -1,-0.940983 z M 116.5,95 c 0.33992,-0.55 1.04148,-1 1.55902,-1 0.51754,0 0.94098,0.45 0.94098,1 0,0.55 -0.70156,1 -1.55902,1 -0.85746,0 -1.2809,-0.45 -0.94098,-1 z m 8.5,0.185596 c 0,-1.012848 13.57404,-0.944893 14.59198,0.07305 C 139.99972,95.666391 136.88333,96 132.66667,96 128.45,96 125,95.633518 125,95.185596 Z M 150.15789,94 c 0,-1.375 0.22698,-1.9375 0.50439,-1.25 0.27741,0.6875 0.27741,1.8125 0,2.5 -0.27741,0.6875 -0.50439,0.125 -0.50439,-1.25 z M 120.75,93.337719 c 0.6875,-0.277412 1.8125,-0.277412 2.5,0 0.6875,0.277413 0.125,0.504386 -1.25,0.504386 -1.375,0 -1.9375,-0.226973 -1.25,-0.504386 z m 21.51903,-0.03071 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z M 126,91.822487 c 0,-1.159476 11.18403,-0.998163 13,0.187505 1.04165,0.680102 -0.71538,0.92675 -5.75,0.807174 C 129.2625,92.722461 126,92.274855 126,91.822487 Z M 147,92 c 0,-0.55 0.45,-1 1,-1 0.55,0 1,0.45 1,1 0,0.55 -0.45,1 -1,1 -0.55,0 -1,-0.45 -1,-1 z m -22.5,-2.531662 c 5.25889,-1.588265 12.55323,-1.437163 18.5,0.383229 3.35111,1.025823 3.2873,1.051779 -1.5,0.610174 -8.02324,-0.740105 -13.71413,-0.773698 -18,-0.106252 -3.61325,0.562697 -3.51656,0.476921 1,-0.887151 z m -1.6875,-2.151452 c 0.72187,-0.288871 1.58437,-0.253344 1.91667,0.07895 0.33229,0.332292 -0.25834,0.568641 -1.3125,0.52522 -1.16495,-0.04798 -1.4019,-0.284941 -0.60417,-0.604167 z m 8.45653,-1.009877 c 0.97297,-0.253543 2.32297,-0.236869 3,0.03705 0.67703,0.273923 -0.11903,0.481368 -1.76903,0.460988 -1.65,-0.02038 -2.20394,-0.244498 -1.23097,-0.498042 z"
- id="path3734"
- inkscape:connector-curvature="0" />
- </g>
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 a 12,12 0 0 1 -12,12 12,12 0 0 1 -12,-12 12,12 0 0 1 12,-12 12,12 0 0 1 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/neutron-contrail/metadata.yaml b/charms/trusty/neutron-contrail/metadata.yaml
deleted file mode 100644
index fff4a7d..0000000
--- a/charms/trusty/neutron-contrail/metadata.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-name: neutron-contrail
-summary: OpenStack Neutron OpenContrail Agent
-maintainer: Robert Ayres <robert.ayres@ubuntu.com>
-description: |
- Neutron is a virtual network service for Openstack, and a part of
- Netstack. Just like OpenStack Nova provides an API to dynamically
- request and configure virtual servers, Neutron provides an API to
- dynamically request and configure virtual networks. These networks
- connect "interfaces" from other OpenStack services (e.g., virtual NICs
- from Nova VMs). The Neutron API supports extensions to provide
- advanced network capabilities (e.g., QoS, ACLs, network monitoring,
- etc.)
- .
- This charm provides the OpenStack Neutron OpenContrail agent, managing
- L2 connectivity on nova-compute services.
-categories:
- - openstack
-subordinate: true
-peers:
- cluster:
- interface: contrail-cluster
-provides:
- neutron-plugin:
- interface: neutron-plugin
- scope: container
-requires:
- container:
- interface: juju-info
- scope: container
- contrail-api:
- interface: contrail-api
- contrail-discovery:
- interface: contrail-discovery
- control-node:
- interface: contrail-control
- identity-admin:
- interface: keystone-admin
- neutron-metadata:
- interface: neutron-metadata
diff --git a/charms/trusty/neutron-contrail/scripts/create-vrouter.sh b/charms/trusty/neutron-contrail/scripts/create-vrouter.sh
deleted file mode 100755
index 686a7b7..0000000
--- a/charms/trusty/neutron-contrail/scripts/create-vrouter.sh
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/bin/sh -e
-#
-# Script used to configure vRouter interface
-
-configVRouter()
-{
- cat juju-header
- echo "auto $1"
- if [ -e "$2" ]; then
- cat "$2"
- else
- echo "iface $1 inet manual"
- fi
- printf "\n%s\n" "auto vhost0"
- if [ -e "$3" ]; then
- cat "$3"
- else
- echo "iface vhost0 inet dhcp"
- fi
- cat <<-EOF
- pre-up ip link add address \$(cat /sys/class/net/$1/address) type vhost
- pre-up vif --add $1 --mac \$(cat /sys/class/net/$1/address) --vrf 0 --vhost-phys --type physical
- pre-up vif --add vhost0 --mac \$(cat /sys/class/net/$1/address) --vrf 0 --type vhost --xconnect $1
- post-down vif --list | awk '/^vif.*OS: vhost0/ {split(\$1, arr, "\\/"); print arr[2];}' | xargs vif --delete
- post-down vif --list | awk '/^vif.*OS: $1/ {split(\$1, arr, "\\/"); print arr[2];}' | xargs vif --delete
- post-down ip link delete vhost0
- EOF
-}
-
-ifacedown()
-{
- for iface; do
- # ifdown interface
- # if bridge, save list of interfaces
- # if bond, save list of slaves
- if [ ! -e /sys/class/net/$iface ]; then
- continue
- fi
- [ -d /sys/class/net/$iface/bridge ] && saveIfaces $iface
- [ -d /sys/class/net/$iface/bonding ] && saveSlaves $iface
- ifdown --force $iface
- done
-}
-
-ifaceup()
-{
- for iface; do
- # ifup interface
- # if bridge, restore list of interfaces
- # restore list of slaves if exists (bond)
- restoreSlaves $iface
- ifup $iface
- [ -d /sys/class/net/$iface/bridge ] && restoreIfaces $iface
- done
- return 0
-}
-
-restoreIfaces()
-{
- if [ -e $TMP/$1.ifaces ]; then
- cat $TMP/$1.ifaces | xargs -n 1 brctl addif $1 || true
- fi
-}
-
-restoreSlaves()
-{
- if [ -e $TMP/$1.slaves ]; then
- cat $TMP/$1.slaves | xargs ifup
- fi
-}
-
-saveIfaces()
-{
- if [ -z "$(find /sys/class/net/$1/brif -maxdepth 0 -empty)" ]; then
- find /sys/class/net/$1/brif | tail -n +2 | xargs -n 1 basename \
- > $TMP/$1.ifaces
- fi
-}
-
-saveSlaves()
-{
- if [ -s /sys/class/net/$1/bonding/slaves ]; then
- cat /sys/class/net/$1/bonding/slaves | tr " " "\n" \
- > $TMP/$1.slaves
- fi
-}
-
-TMP=$(mktemp -d /tmp/create-vrouter.XXX)
-
-if [ $# -ne 0 ]; then
- interface=$1
-else
- # use default gateway interface
- interface=$(route -n | awk '$1 == "0.0.0.0" { print $8 }')
-fi
-
-ifacedown $interface vhost0; sleep 5
-# add interfaces.d source line to /etc/network/interfaces
-if ! grep -q '^[[:blank:]]*source /etc/network/interfaces\.d/\*\.cfg[[:blank:]]*$' \
- /etc/network/interfaces; then
- printf "\n%s\n" "source /etc/network/interfaces.d/*.cfg" \
- >> /etc/network/interfaces
- # it's possible for conflicting network config to exist in
- # /etc/network/interfaces.d when we start sourcing it
- # so disable any config as a precautionary measure
- for cfg in /etc/network/interfaces.d/*.cfg; do
- [ -e "$cfg" ] || continue
- mv "$cfg" "$cfg.save"
- done
-fi
-mkdir -p /etc/network/interfaces.d
-for cfg in /etc/network/interfaces /etc/network/interfaces.d/*.cfg \
- /etc/network/*.config; do
- # for each network interfaces config, extract the config for
- # the chosen interface whilst commenting it out in the subsequent
- # replacement config
- [ -e "$cfg" ] || continue
- awk -v interface=$interface -v interface_cfg=$TMP/interface.cfg \
- -v vrouter_cfg=$TMP/vrouter.cfg -f vrouter-interfaces.awk "$cfg" \
- > $TMP/interfaces.cfg
- if ! diff $TMP/interfaces.cfg "$cfg" > /dev/null; then
- # create backup
- mv "$cfg" "$cfg.save"
- # substitute replacement config for original config
- cat juju-header $TMP/interfaces.cfg > "$cfg"
- fi
-done
-# use extracted interface config to create new vrouter config
-configVRouter $interface $TMP/interface.cfg $TMP/vrouter.cfg \
- > /etc/network/interfaces.d/vrouter.cfg
-ifaceup $interface vhost0
-
-rm -rf $TMP
diff --git a/charms/trusty/neutron-contrail/scripts/interfaces b/charms/trusty/neutron-contrail/scripts/interfaces
deleted file mode 100644
index 0356643..0000000
--- a/charms/trusty/neutron-contrail/scripts/interfaces
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
diff --git a/charms/trusty/neutron-contrail/scripts/juju-header b/charms/trusty/neutron-contrail/scripts/juju-header
deleted file mode 100644
index fccac13..0000000
--- a/charms/trusty/neutron-contrail/scripts/juju-header
+++ /dev/null
@@ -1,5 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
diff --git a/charms/trusty/neutron-contrail/scripts/remove-juju-bridge.sh b/charms/trusty/neutron-contrail/scripts/remove-juju-bridge.sh
deleted file mode 100755
index d9a9ec1..0000000
--- a/charms/trusty/neutron-contrail/scripts/remove-juju-bridge.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh -e
-#
-# Script used to remove Juju LXC bridge on MAAS systems
-
-if [ ! -e /sys/class/net/juju-br0 ]; then
- exit 0
-fi
-
-interface=$(find /sys/class/net/juju-br0/brif | sed -n -e '2p' | xargs basename)
-
-ifdown --force $interface juju-br0; sleep 5
-cp interfaces /etc/network
-cat <<-EOF >> /etc/network/interfaces
- auto $interface
- iface $interface inet dhcp
- EOF
-ifup $interface
diff --git a/charms/trusty/neutron-contrail/scripts/vhost-phys.sh b/charms/trusty/neutron-contrail/scripts/vhost-phys.sh
deleted file mode 100755
index 6565d1c..0000000
--- a/charms/trusty/neutron-contrail/scripts/vhost-phys.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh -e
-#
-# Script used to determine physical interface of vhost0
-
-mac=$(cat /sys/class/net/vhost0/address)
-vif --list | awk -v mac=$mac 'BEGIN { RS="\n\n" }; $3 != "vhost0" && $0 ~ "HWaddr:" mac { print $3; exit 0 }'
diff --git a/charms/trusty/neutron-contrail/scripts/vrouter-interfaces.awk b/charms/trusty/neutron-contrail/scripts/vrouter-interfaces.awk
deleted file mode 100644
index d8e5851..0000000
--- a/charms/trusty/neutron-contrail/scripts/vrouter-interfaces.awk
+++ /dev/null
@@ -1,38 +0,0 @@
-function strip(s)
-{
- sub(/^[[:blank:]]+/, "", s)
- sub(/[[:blank:]]+$/, "", s)
- return s
-}
-
-/^[[:blank:]]*(iface|mapping|auto|allow-[^ ]+|source) / {
- s_iface = 0; iface = 0
-}
-
-$0 ~ "^[[:blank:]]*auto (" interface "|vhost0)[[:blank:]]*$" { print "#" $0; next }
-
-$0 ~ "^[[:blank:]]*iface (" interface "|vhost0) " {
- s_iface = 1
- if ($2 == interface) {
- iface = 1
- print "iface", interface, $3, "manual" > interface_cfg
- print "iface vhost0", $3, $4 > vrouter_cfg
- }
- print "#" $0
- next
-}
-
-s_iface == 1 {
- if (iface == 1) {
- if (match($1, "^address|netmask|broadcast|metric|gateway$")) {
- cfg = vrouter_cfg
- } else {
- cfg = interface_cfg
- }
- print " " strip($0) > cfg
- }
- print "#" $0
- next
-}
-
-{ print $0 }
diff --git a/charms/trusty/neutron-contrail/templates/contrail-barbican-auth.conf b/charms/trusty/neutron-contrail/templates/contrail-barbican-auth.conf
deleted file mode 100644
index 84ab4a0..0000000
--- a/charms/trusty/neutron-contrail/templates/contrail-barbican-auth.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DEFAULT]
-auth_url = http://{{ auth_host }}:{{ auth_port }}/v2.0
-auth_version = 2
-admin_user = {{ admin_user }}
-admin_password = {{ admin_password }}
-admin_tenant_name = {{ admin_tenant_name }}
-region = {{ auth_region }}
-
diff --git a/charms/trusty/neutron-contrail/templates/contrail-vrouter-agent.conf b/charms/trusty/neutron-contrail/templates/contrail-vrouter-agent.conf
deleted file mode 100644
index 3543c2d..0000000
--- a/charms/trusty/neutron-contrail/templates/contrail-vrouter-agent.conf
+++ /dev/null
@@ -1,44 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-{%- if control_nodes %}
-
-[CONTROL-NODE]
-server = {{ control_nodes|join(" ") }}
-
-{%- endif %}
-{%- if discovery_server %}
-
-[DISCOVERY]
-server = {{ discovery_server }}
-
-{%- endif %}
-
-[METADATA]
-metadata_proxy_secret = {{ metadata_secret }}
-
-[NETWORKS]
-control_network_ip = {{ control_network_ip }}
-
-[VIRTUAL-HOST-INTERFACE]
-name = vhost0
-ip = {{ vhost_ip }}
-gateway = {{ vhost_gateway }}
-physical_interface = {{ vhost_physical }}
-
-{%- for vgw in vgws %}
-
-[GATEWAY-{{ loop.index0 }}]
-routing_instance = {{ vgw["domain"] }}:{{ vgw["project"] }}:{{ vgw["network"] }}:{{ vgw["network"] }}
-interface = {{ vgw["interface"] }}
-ip_blocks = {{ vgw["subnets"]|join(" ") }}
-routes = {{ vgw["routes"]|join(" ") }}
-
-{%- endfor %}
-
-[SERVICE-INSTANCE]
-netns_command = /usr/bin/opencontrail-vrouter-netns
-docker_command = /usr/bin/opencontrail-vrouter-docker
-
diff --git a/charms/trusty/neutron-contrail/templates/contrail-vrouter-nodemgr.conf b/charms/trusty/neutron-contrail/templates/contrail-vrouter-nodemgr.conf
deleted file mode 100644
index ffea798..0000000
--- a/charms/trusty/neutron-contrail/templates/contrail-vrouter-nodemgr.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[DISCOVERY]
-server = {{ discovery_server }}
-port = {{ discovery_port }}
-
diff --git a/charms/trusty/neutron-contrail/templates/vnc_api_lib.ini b/charms/trusty/neutron-contrail/templates/vnc_api_lib.ini
deleted file mode 100644
index fd68487..0000000
--- a/charms/trusty/neutron-contrail/templates/vnc_api_lib.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-[global]
-WEB_SERVER = {{ api_server }}
-WEB_PORT = {{ api_port }}
-
-[auth]
-AUTHN_TYPE = keystone
-AUTHN_PROTOCOL = http
-AUTHN_SERVER = {{ auth_host }}
-AUTHN_PORT = {{ auth_port }}
-AUTHN_URL = /v2.0/tokens
-
diff --git a/charms/trusty/neutron-contrail/templates/vrouter-vgw.cfg b/charms/trusty/neutron-contrail/templates/vrouter-vgw.cfg
deleted file mode 100644
index f5afd46..0000000
--- a/charms/trusty/neutron-contrail/templates/vrouter-vgw.cfg
+++ /dev/null
@@ -1,21 +0,0 @@
-###############################################################################
-# [ WARNING ]
-# Configuration file maintained by Juju. Local changes may be overwritten.
-###############################################################################
-
-{%- for vgw in vgws %}
-{% set interface = vgw["interface"] %}
-auto {{ interface }}
-iface {{ interface }} inet manual
- pre-up vif --create {{ interface }} --mac 00:00:5e:00:01:00
-{%- for subnet in vgw["subnets"] %}
- post-up route add -net {{ subnet }} dev {{ interface }}
-{%- endfor %}
-{%- for subnet in vgw["subnets"] %}
- pre-down route del -net {{ subnet }} dev {{ interface }}
-{%- endfor %}
- post-down id=$(vif --list | awk '/vif[0-9\/]+[\t ]+ OS: {{ interface }}/ { split($1, arr, "/"); print arr[2]; }'); \
- { [ -n "$id" ] && vif --delete $id; } || true
- post-down ip link delete {{ interface }}
-{%- endfor %}
-
diff --git a/charms/trusty/zookeeper/.bzr/README b/charms/trusty/zookeeper/.bzr/README
deleted file mode 100644
index f82dc1c..0000000
--- a/charms/trusty/zookeeper/.bzr/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a Bazaar control directory.
-Do not change any files in this directory.
-See http://bazaar.canonical.com/ for more information about Bazaar.
diff --git a/charms/trusty/zookeeper/.bzr/branch-format b/charms/trusty/zookeeper/.bzr/branch-format
deleted file mode 100644
index 9eb09b7..0000000
--- a/charms/trusty/zookeeper/.bzr/branch-format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar-NG meta directory, format 1
diff --git a/charms/trusty/zookeeper/.bzr/branch/branch.conf b/charms/trusty/zookeeper/.bzr/branch/branch.conf
deleted file mode 100644
index 01ebfba..0000000
--- a/charms/trusty/zookeeper/.bzr/branch/branch.conf
+++ /dev/null
@@ -1 +0,0 @@
-parent_location = http://bazaar.launchpad.net/~charmers/charms/trusty/zookeeper/trunk/
diff --git a/charms/trusty/zookeeper/.bzr/branch/format b/charms/trusty/zookeeper/.bzr/branch/format
deleted file mode 100644
index dc392f4..0000000
--- a/charms/trusty/zookeeper/.bzr/branch/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Branch Format 7 (needs bzr 1.6)
diff --git a/charms/trusty/zookeeper/.bzr/branch/last-revision b/charms/trusty/zookeeper/.bzr/branch/last-revision
deleted file mode 100644
index 8c28e96..0000000
--- a/charms/trusty/zookeeper/.bzr/branch/last-revision
+++ /dev/null
@@ -1 +0,0 @@
-13 chuck@dasroot.net-20141031214845-9yvqu184ifre9jp4
diff --git a/charms/trusty/zookeeper/.bzr/branch/tags b/charms/trusty/zookeeper/.bzr/branch/tags
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/zookeeper/.bzr/branch/tags
+++ /dev/null
diff --git a/charms/trusty/zookeeper/.bzr/checkout/conflicts b/charms/trusty/zookeeper/.bzr/checkout/conflicts
deleted file mode 100644
index 0dc2d3a..0000000
--- a/charms/trusty/zookeeper/.bzr/checkout/conflicts
+++ /dev/null
@@ -1 +0,0 @@
-BZR conflict list format 1
diff --git a/charms/trusty/zookeeper/.bzr/checkout/dirstate b/charms/trusty/zookeeper/.bzr/checkout/dirstate
deleted file mode 100644
index d9658e8..0000000
--- a/charms/trusty/zookeeper/.bzr/checkout/dirstate
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/zookeeper/.bzr/checkout/format b/charms/trusty/zookeeper/.bzr/checkout/format
deleted file mode 100644
index e0261c7..0000000
--- a/charms/trusty/zookeeper/.bzr/checkout/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar Working Tree Format 6 (bzr 1.14)
diff --git a/charms/trusty/zookeeper/.bzr/checkout/views b/charms/trusty/zookeeper/.bzr/checkout/views
deleted file mode 100644
index e69de29..0000000
--- a/charms/trusty/zookeeper/.bzr/checkout/views
+++ /dev/null
diff --git a/charms/trusty/zookeeper/.bzr/repository/format b/charms/trusty/zookeeper/.bzr/repository/format
deleted file mode 100644
index b200528..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/format
+++ /dev/null
@@ -1 +0,0 @@
-Bazaar repository format 2a (needs bzr 1.16 or later)
diff --git a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.cix b/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.cix
deleted file mode 100644
index 8fe8b04..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.cix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.iix b/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.iix
deleted file mode 100644
index 5f4e20b..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.iix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.rix b/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.rix
deleted file mode 100644
index 10e1ba3..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.rix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.six b/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.six
deleted file mode 100644
index a2afde6..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.six
+++ /dev/null
@@ -1,5 +0,0 @@
-B+Tree Graph Index 2
-node_ref_lists=0
-key_elements=1
-len=0
-row_lengths=
diff --git a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.tix b/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.tix
deleted file mode 100644
index 3fc4172..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/indices/3c2f9bccb787d7e4c58ceea02b4606c0.tix
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/zookeeper/.bzr/repository/pack-names b/charms/trusty/zookeeper/.bzr/repository/pack-names
deleted file mode 100644
index 062d941..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/pack-names
+++ /dev/null
@@ -1,7 +0,0 @@
-B+Tree Graph Index 2
-node_ref_lists=0
-key_elements=1
-len=1
-row_lengths=1
-xœÁ±€ @j¦Èñ?Cb¨,,lÜÞ»÷{ò¼s®ºG[Ã#œÆ‹Ùã°ÈœÚ¼CZ
-H†lj6!Pnj¡ \ No newline at end of file
diff --git a/charms/trusty/zookeeper/.bzr/repository/packs/3c2f9bccb787d7e4c58ceea02b4606c0.pack b/charms/trusty/zookeeper/.bzr/repository/packs/3c2f9bccb787d7e4c58ceea02b4606c0.pack
deleted file mode 100644
index 6ae57de..0000000
--- a/charms/trusty/zookeeper/.bzr/repository/packs/3c2f9bccb787d7e4c58ceea02b4606c0.pack
+++ /dev/null
Binary files differ
diff --git a/charms/trusty/zookeeper/README.md b/charms/trusty/zookeeper/README.md
deleted file mode 100644
index 7842422..0000000
--- a/charms/trusty/zookeeper/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-ZooKeeper is a centralized service for maintaining configuration information,
-naming, providing distributed synchronization, and providing group services.
-All of these kinds of services are used in some form or another by distributed
-applications. Each time they are implemented there is a lot of work that goes
-into fixing the bugs and race conditions that are inevitable. Because of the
-difficulty of implementing these kinds of services, applications initially
-usually skimp on them ,which make them brittle in the presence of change and
-difficult to manage. Even when done correctly, different implementations of
-these services lead to management complexity when the applications are
-deployed.
diff --git a/charms/trusty/zookeeper/config.yaml b/charms/trusty/zookeeper/config.yaml
deleted file mode 100644
index ecb02ac..0000000
--- a/charms/trusty/zookeeper/config.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-options:
- zk-port:
- type: int
- default: 2181
- description: ZooKeeper Client Access Port
- source:
- type: string
- default: archive
- description: |
- Location and packages to install zookeeper:
- .
- * archive: Install using the zookeeperd package from the main
- Ubuntu Archive.
- * dev: Install using the hadoop-zookeeper-server package from
- ppa:hadoop-ubuntu/dev.
- * testing: Install using the hadoop-zookeeper-server package from
- ppa:hadoop-ubuntu/testing.
- * stable: Install using the hadoop-zookeeper-server package from
- ppa:hadoop-ubuntu/stable.
- .
- The packages provides in the hadoop-ubuntu team PPA's are based
- directly on upstream ZooKeeper releases but are not fully built from
- source.
- default_weight:
- type: int
- default: 1
- description: default weight
- default_group:
- type: int
- default: 0
- description: default group
- external_server:
- type: string
- default: ""
- description: |
- Extra servers ( external to juju ) to add
- to zoo.cnf. Format should be id:group:weight:host:port:port
- group can be defined as "default" as opposed to a number to use
- the default_group defined above
diff --git a/charms/trusty/zookeeper/copyright b/charms/trusty/zookeeper/copyright
deleted file mode 100644
index a6ddcf8..0000000
--- a/charms/trusty/zookeeper/copyright
+++ /dev/null
@@ -1,17 +0,0 @@
-Format: http://dep.debian.net/deps/dep5/
-
-Files: *
-Copyright: Copyright 2012, Canonical Ltd., All Rights Reserved.
-License: GPL-3
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/charms/trusty/zookeeper/hooks/install b/charms/trusty/zookeeper/hooks/install
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/install
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/quorum-relation-changed b/charms/trusty/zookeeper/hooks/quorum-relation-changed
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/quorum-relation-changed
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/quorum-relation-departed b/charms/trusty/zookeeper/hooks/quorum-relation-departed
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/quorum-relation-departed
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/quorum-relation-joined b/charms/trusty/zookeeper/hooks/quorum-relation-joined
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/quorum-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/start b/charms/trusty/zookeeper/hooks/start
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/start
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/stop b/charms/trusty/zookeeper/hooks/stop
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/stop
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/upgrade-charm b/charms/trusty/zookeeper/hooks/upgrade-charm
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/upgrade-charm
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/hooks/zookeeper-common b/charms/trusty/zookeeper/hooks/zookeeper-common
deleted file mode 100755
index 578d856..0000000
--- a/charms/trusty/zookeeper/hooks/zookeeper-common
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/bin/bash
-
-set -eux
-
-# By default we install from the main Ubuntu archive.
-source=`config-get source`
-case $source in
- archive)
- juju-log "Configuring zookeeper using the Ubuntu archive packages..."
- zk_package="zookeeperd"
- zk_service="zookeeper"
- zk_conf="/etc/zookeeper/conf/zoo.cfg"
- zk_myid="/etc/zookeeper/conf/myid"
- zk_install="/etc/init/zookeeper.conf"
- ;;
- dev|testing|stable)
- juju-log "Configuring zookeeper using the Hadoop Ubuntu Team PPA..."
- zk_package="hadoop-zookeeper-server"
- zk_service="hadoop-zookeeper-server"
- zk_conf="/etc/hadoop-zookeeper/conf/zoo.cfg"
- zk_myid="/etc/hadoop-zookeeper/conf/myid"
- zk_install="/etc/init/hadoop-zookeeper-server.conf"
- ;;
- *)
- juju-log "Unsupported source..."
- exit 1
- ;;
-esac
-
-base_packages="ntp"
-zk_port="`config-get zk-port`"
-
-configure_sources () {
- source=`config-get source`
- case $source in
- archive)
- juju-log "Configuring zookeeper using the Ubuntu archive packages..."
- ;;
- dev|testing|stable)
- juju-log "Configuring zookeeper using the Hadoop Ubuntu Team PPA..."
- add-apt-repository ppa:hadoop-ubuntu/$source
- apt-get update
- ;;
- *)
- juju-log "Unsupported source..."
- exit 1
- ;;
- esac
-}
-
-install_zookeeper () {
- juju-log "Installing ZooKeeper..."
- apt-get install -y $zk_package $base_packages
-}
-
-configure_zookeeper () {
- juju-log "Purging any standalone configuration..."
- # Purge
- sed -i "/^server./d" $zk_conf
- sed -i "/^weight./d" $zk_conf
- sed -i "/^group./d" $zk_conf
- juju-log "Generating unique ID for this instance..."
- unit_no=`echo $JUJU_UNIT_NAME | cut -d / -f 2`
- default_weight=`config-get default_weight`
- default_group=`config-get default_group`
- echo $unit_no > $zk_myid
- juju-log "Adding this unit to the quorum..."
- hostname=`unit-get private-address`
- # Add the config
- echo "server.${unit_no}=${hostname}:2888:3888" >> $zk_conf
- echo "group.${default_group}=${unit_no}" >> $zk_conf
- echo "weight.${unit_no}=${default_weight}" >> $zk_conf
- # Expose port as required
- open-port $zk_port
-}
-
-# Service Control Commands
-restart_zookeeper () {
- juju-log "Restarting ZooKeeper"
- service $zk_service status && service $zk_service restart || :
-}
-
-start_zookeeper() {
- juju-log "Starting ZooKeeper"
- service $zk_service status || service $zk_service start
-}
-
-stop_zookeeper() {
- juju-log "Stopping ZooKeeper"
- service $zk_service stop || :
-}
-
-update_group() {
- member_id=$1
- [ -z ${member_id} ] && return 0
- default_group=`config-get default_group`
- group_arr=( $(grep "group.${default_group}" ${zk_conf} | awk -F'=' '{ print $2 }' | tr ":" "\n") )
- member_found="no"
- for member in "${group_arr[@]}"
- do
- [ "${member_id}" == "${member}" ] && member_found="yes"
- done
- [ "${member_found}" == "no" ] && group_arr=("${group_arr[@]}" "${member_id}")
- retVal="group.${default_group}=${group_arr[@]}"
- retVal=`echo ${retVal} | tr " " ":"`
- sed -i "s/^group.${default_group}.*/${retVal}/" ${zk_conf}
- sed -i "s/^group.${default_group}=:/group.${default_group}=/" ${zk_conf}
-}
-
-update_external_server() {
- # Add extra server ( if configured )
- external_server=`config-get external_server`
- if [[ "${external_server}" != "" ]]; then
- external_id=`echo ${external_server} | cut -d : -f 1`
- external_group=`echo ${external_server} | cut -d : -f 2`
- if [[ "${external_group}" == "default" ]]; then
- external_group=`config-get default_group`
- fi
- external_weight=`echo ${external_server} | cut -d : -f 3`
- external_host=`echo ${external_server} | cut -d : -f 4`
- external_port1=`echo ${external_server} | cut -d : -f 5`
- external_port2=`echo ${external_server} | cut -d : -f 6`
- sed -i "/^server.${external_id}/d" $zk_conf
- sed -i "/^weight.${external_id}/d" $zk_conf
- echo "server.${external_id}=${external_host}:${external_port1}:${external_port2}" >> $zk_conf
- update_group ${external_id}
- echo "weight.${external_id}=${external_weight}" >> $zk_conf
- open-port ${external_port1}
- open-port ${external_port2}
- fi
-}
-
-update_quorum() {
- default_group=`config-get default_group`
- default_weight=`config-get default_weight`
- # Purge out existing quorum config to deal with departure of
- # ZK nodes
- sed -i "/^server./d" $zk_conf
- sed -i "/^weight./d" $zk_conf
- sed -i "/^group./d" $zk_conf
- # Add this node back into the list
- juju-log "Adding this unit to the quorum..."
- unit_no=`echo $JUJU_UNIT_NAME | cut -d / -f 2`
- hostname=`unit-get private-address`
- server_arr[0]="server.${unit_no}=${hostname}:2888:3888"
- weight_arr[0]="weight.${unit_no}=${default_weight}"
- group_arr[0]=${unit_no}
- # Re-create based on current relations
- for member in `relation-list`
- do
- juju-log "Adding $member to quorum"
- member_id=`echo ${member} | cut -d / -f 2`
- member_hostname=`relation-get private-address ${member}`
- server_arr=("${server_arr[@]}" "server.${member_id}=${member_hostname}:2888:3888")
- weight_arr=("${weight_arr[@]}" "weight.${member_id}=${default_weight}")
- group_arr=("${group_arr[@]}" "${member_id}")
- done
- # Dump the new config
- # servers
- for server_line in "${server_arr[@]}"
- do
- echo "${server_line}" >> ${zk_conf}
- done
- # weight
- for member_line in "${weight_arr[@]}"
- do
- echo "${member_line}" >> $zk_conf
- done
- # group
- echo "group.${default_group}=${group_arr[@]}" | tr " " ":" >> $zk_conf
-}
-
-setup_zk_interface() {
- juju-log "Setup ZooKeeper Client"
- relation-set port=$zk_port
-}
-
-COMMAND=`basename $0`
-
-case $COMMAND in
- install)
- configure_sources
- install_zookeeper
- configure_zookeeper
- update_external_server
- restart_zookeeper
- ;;
- config-changed)
- update_external_server
- restart_zookeeper
- ;;
- start)
- start_zookeeper
- ;;
- stop)
- stop_zookeeper
- ;;
- quorum-relation-joined)
- ;;
- quorum-relation-changed|quorum-relation-departed)
- update_quorum
- update_external_server
- restart_zookeeper
- ;;
- zookeeper-relation-joined)
- setup_zk_interface
- ;;
- upgrade-charm)
- configure_zookeeper
- update_external_server
- restart_zookeeper
- ;;
- *)
- juju-log "Command not recognised"
- ;;
-esac
diff --git a/charms/trusty/zookeeper/hooks/zookeeper-relation-joined b/charms/trusty/zookeeper/hooks/zookeeper-relation-joined
deleted file mode 120000
index 91a1891..0000000
--- a/charms/trusty/zookeeper/hooks/zookeeper-relation-joined
+++ /dev/null
@@ -1 +0,0 @@
-zookeeper-common \ No newline at end of file
diff --git a/charms/trusty/zookeeper/icon.svg b/charms/trusty/zookeeper/icon.svg
deleted file mode 100644
index 6cbf74d..0000000
--- a/charms/trusty/zookeeper/icon.svg
+++ /dev/null
@@ -1,292 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="96"
- height="96"
- id="svg6517"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="juju-icon-placeholder-z.svg">
- <defs
- id="defs6519">
- <linearGradient
- id="Background">
- <stop
- id="stop4178"
- offset="0"
- style="stop-color:#b8b8b8;stop-opacity:1" />
- <stop
- id="stop4180"
- offset="1"
- style="stop-color:#c9c9c9;stop-opacity:1" />
- </linearGradient>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Inner Shadow"
- id="filter1121">
- <feFlood
- flood-opacity="0.59999999999999998"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood1123" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="out"
- result="composite1"
- id="feComposite1125" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur1127" />
- <feOffset
- dx="0"
- dy="2"
- result="offset"
- id="feOffset1129" />
- <feComposite
- in="offset"
- in2="SourceGraphic"
- operator="atop"
- result="composite2"
- id="feComposite1131" />
- </filter>
- <filter
- style="color-interpolation-filters:sRGB;"
- inkscape:label="Drop Shadow"
- id="filter950">
- <feFlood
- flood-opacity="0.25"
- flood-color="rgb(0,0,0)"
- result="flood"
- id="feFlood952" />
- <feComposite
- in="flood"
- in2="SourceGraphic"
- operator="in"
- result="composite1"
- id="feComposite954" />
- <feGaussianBlur
- in="composite1"
- stdDeviation="1"
- result="blur"
- id="feGaussianBlur956" />
- <feOffset
- dx="0"
- dy="1"
- result="offset"
- id="feOffset958" />
- <feComposite
- in="SourceGraphic"
- in2="offset"
- operator="over"
- result="composite2"
- id="feComposite960" />
- <feBlend
- blend="normal"
- id="feBlend3895"
- in2="composite2" />
- </filter>
- <clipPath
- clipPathUnits="userSpaceOnUse"
- id="clipPath873">
- <g
- transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
- id="g875"
- inkscape:label="Layer 1"
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
- <path
- style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
- d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
- id="path877"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- </clipPath>
- <filter
- inkscape:collect="always"
- id="filter891"
- inkscape:label="Badge Shadow">
- <feGaussianBlur
- inkscape:collect="always"
- stdDeviation="0.71999962"
- id="feGaussianBlur893" />
- </filter>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="8.1490724"
- inkscape:cx="6.3116087"
- inkscape:cy="46.975739"
- inkscape:document-units="px"
- inkscape:current-layer="layer3"
- showgrid="true"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- inkscape:window-width="1849"
- inkscape:window-height="1218"
- inkscape:window-x="123"
- inkscape:window-y="68"
- inkscape:window-maximized="0"
- showborder="true"
- showguides="true"
- inkscape:guide-bbox="true"
- inkscape:showpageshadow="false">
- <inkscape:grid
- type="xygrid"
- id="grid821" />
- <sodipodi:guide
- orientation="1,0"
- position="16,48"
- id="guide823" />
- <sodipodi:guide
- orientation="0,1"
- position="64,80"
- id="guide825" />
- <sodipodi:guide
- orientation="1,0"
- position="80,40"
- id="guide827" />
- <sodipodi:guide
- orientation="0,1"
- position="64,16"
- id="guide829" />
- </sodipodi:namedview>
- <metadata
- id="metadata6522">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title />
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="BACKGROUND"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(268,-635.29076)"
- style="display:inline">
- <path
- style="fill:#333333;fill-opacity:0.93333334000000001;stroke:none;display:inline;filter:url(#filter1121);opacity:0.7"
- d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
- id="path6455"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="sssssssss" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="PLACEHOLDER LETTER"
- style="display:inline">
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;filter:url(#filter950);font-family:Sans"
- x="34.504002"
- y="62.644001"
- id="text3891"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan3893"
- x="34.504002"
- y="62.644001"
- style="font-size:56px;font-style:normal;font-variant:normal;font-weight:500;font-stretch:normal;fill:#ffffff;fill-opacity:1;font-family:Ubuntu;-inkscape-font-specification:Ubuntu Medium">z</tspan></text>
- <rect
- style="opacity:0.69999999999999996;fill:none;stroke:none"
- id="rect3021"
- width="64"
- height="64"
- x="16"
- y="16" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="BADGE"
- style="display:none"
- sodipodi:insensitive="true">
- <g
- style="display:inline"
- transform="translate(-340.00001,-581)"
- id="g4394"
- clip-path="none">
- <g
- id="g855">
- <g
- inkscape:groupmode="maskhelper"
- id="g870"
- clip-path="url(#clipPath873)"
- style="opacity:0.6;filter:url(#filter891)">
- <path
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
- d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path844"
- style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- </g>
- <g
- id="g862">
- <path
- sodipodi:type="arc"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4398"
- sodipodi:cx="252"
- sodipodi:cy="552.36218"
- sodipodi:rx="12"
- sodipodi:ry="12"
- d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
- transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
- <path
- transform="matrix(1.25,0,0,1.25,33,-100.45273)"
- d="m 264,552.36218 c 0,6.62742 -5.37258,12 -12,12 -6.62742,0 -12,-5.37258 -12,-12 0,-6.62741 5.37258,-12 12,-12 6.62742,0 12,5.37259 12,12 z"
- sodipodi:ry="12"
- sodipodi:rx="12"
- sodipodi:cy="552.36218"
- sodipodi:cx="252"
- id="path4400"
- style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- sodipodi:type="arc" />
- <path
- sodipodi:type="star"
- style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
- id="path4459"
- sodipodi:sides="5"
- sodipodi:cx="666.19574"
- sodipodi:cy="589.50385"
- sodipodi:r1="7.2431178"
- sodipodi:r2="4.3458705"
- sodipodi:arg1="1.0471976"
- sodipodi:arg2="1.6755161"
- inkscape:flatsided="false"
- inkscape:rounded="0.1"
- inkscape:randomized="0"
- d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 -0.18379,0.41279 0.0427,4.27917 -0.34859,4.5051 z"
- transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
- </g>
- </g>
- </g>
- </g>
-</svg>
diff --git a/charms/trusty/zookeeper/metadata.yaml b/charms/trusty/zookeeper/metadata.yaml
deleted file mode 100644
index 871ce24..0000000
--- a/charms/trusty/zookeeper/metadata.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-name: zookeeper
-maintainer: James Page <james.page@ubuntu.com>
-summary: High-performance coordination service for distributed applications
-tags:
- - database
-description: |
- ZooKeeper is a centralized, reliable, service for maintaining
- configuration information, naming, providing distributed
- synchronization, and group services. All of these kinds of services
- are used in some form or another by distributed applications. This
- package contains the shell scripts and an example configuration but
- does not automatically start up the service. The example configuration
- is installed with the update-alternatives mechanism.
-provides:
- zookeeper:
- interface: zookeeper
-peers:
- quorum:
- interface: zookeeper-quorum
diff --git a/charms/trusty/zookeeper/revision b/charms/trusty/zookeeper/revision
deleted file mode 100644
index 48082f7..0000000
--- a/charms/trusty/zookeeper/revision
+++ /dev/null
@@ -1 +0,0 @@
-12
diff --git a/charms/trusty/zookeeper/tests/00-setup b/charms/trusty/zookeeper/tests/00-setup
deleted file mode 100644
index 48cd230..0000000
--- a/charms/trusty/zookeeper/tests/00-setup
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-# This script sets up the requirements for amulet tests.
-
-set -x
-
-# Check if amulet is installed before adding the stable repository and updating apt-get.
-dpkg -s amulet
-if [ $? -ne 0 ]; then
- sudo add-apt-repository -y ppa:juju/stable
- sudo apt-get update -qq
- sudo apt-get install -y amulet
-fi
-
-# Install any additional python packages or other required software.
-sudo apt-get install -y python3-requests \ No newline at end of file
diff --git a/charms/trusty/zookeeper/tests/10-bundles-test.py b/charms/trusty/zookeeper/tests/10-bundles-test.py
deleted file mode 100644
index 67710c7..0000000
--- a/charms/trusty/zookeeper/tests/10-bundles-test.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env python3
-
-# This amulet test deploys the bundles.yaml file in this directory.
-
-import os
-import unittest
-import yaml
-import amulet
-
-seconds_to_wait = 720
-
-
-class BundleTest(unittest.TestCase):
- """ Create a class for testing the charm in the unit test framework. """
- @classmethod
- def setUpClass(cls):
- """ Set up an amulet deployment using the bundle. """
- d = amulet.Deployment()
- bundle_path = os.path.join(os.path.dirname(__file__), 'bundles.yaml')
- with open(bundle_path, 'r') as bundle_file:
- contents = yaml.safe_load(bundle_file)
- d.load(contents)
- d.setup(seconds_to_wait)
- d.sentry.wait(seconds_to_wait)
- cls.d = d
-
- def test_deployed(self):
- """ Test to see if the bundle deployed successfully. """
- self.assertTrue(self.d.deployed)
-
-
-if __name__ == '__main__':
- unittest.main() \ No newline at end of file
diff --git a/charms/trusty/zookeeper/tests/bundles.yaml b/charms/trusty/zookeeper/tests/bundles.yaml
deleted file mode 100644
index 0b7699c..0000000
--- a/charms/trusty/zookeeper/tests/bundles.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-zookeeper-automated-bundle:
- relations: []
- series: trusty
- services:
- zookeeper:
- charm: zookeeper
- num_units: 1