diff options
Diffstat (limited to 'charms/trusty/cassandra/hooks')
50 files changed, 0 insertions, 8777 deletions
diff --git a/charms/trusty/cassandra/hooks/actions.py b/charms/trusty/cassandra/hooks/actions.py deleted file mode 100644 index 8887056..0000000 --- a/charms/trusty/cassandra/hooks/actions.py +++ /dev/null @@ -1,990 +0,0 @@ -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -from contextlib import closing -import errno -from functools import wraps -import glob -import os.path -import re -import shlex -import socket -import subprocess -from textwrap import dedent -import time -import urllib.request - -from charmhelpers import fetch -from charmhelpers.contrib.charmsupport import nrpe -from charmhelpers.contrib.network import ufw -from charmhelpers.contrib.templating import jinja -from charmhelpers.core import hookenv, host -from charmhelpers.core.fstab import Fstab -from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING - -import cassandra - -from coordinator import coordinator -import helpers -import relations - - -# These config keys cannot be changed after service deployment. -UNCHANGEABLE_KEYS = set(['cluster_name', 'datacenter', 'rack', 'edition']) - -# If any of these config items are changed, Cassandra needs to be -# restarted and maybe remounted. -RESTART_REQUIRED_KEYS = set([ - 'data_file_directories', - 'commitlog_directory', - 'saved_caches_directory', - 'storage_port', - 'ssl_storage_port', - 'rpc_port', - 'native_transport_port', - 'partitioner', - 'num_tokens', - 'max_heap_size', - 'heap_newsize', - 'authenticator', - 'authorizer', - 'compaction_throughput_mb_per_sec', - 'stream_throughput_outbound_megabits_per_sec', - 'tombstone_warn_threshold', - 'tombstone_failure_threshold', - 'jre', - 'private_jre_url']) - -ALL_CONFIG_KEYS = UNCHANGEABLE_KEYS.union(RESTART_REQUIRED_KEYS) - - -# All other config items. By maintaining both lists, we can detect if -# someone forgot to update these lists when they added a new config item. -RESTART_NOT_REQUIRED_KEYS = set([ - 'extra_packages', - 'package_status', - 'install_sources', - 'install_keys', - 'http_proxy', - 'wait_for_storage_broker', - 'io_scheduler', - 'nagios_context', - 'nagios_servicegroups', - 'nagios_heapchk_warn_pct', - 'nagios_heapchk_crit_pct', - 'nagios_disk_warn_pct', - 'nagios_disk_crit_pct']) - - -def action(func): - '''Log and call func, stripping the undesirable servicename argument. - ''' - @wraps(func) - def wrapper(servicename, *args, **kw): - if hookenv.remote_unit(): - hookenv.log("** Action {}/{} ({})".format(hookenv.hook_name(), - func.__name__, - hookenv.remote_unit())) - else: - hookenv.log("** Action {}/{}".format(hookenv.hook_name(), - func.__name__)) - return func(*args, **kw) - return wrapper - - -def leader_only(func): - '''Decorated function is only run on the leader.''' - @wraps(func) - def wrapper(*args, **kw): - if hookenv.is_leader(): - return func(*args, **kw) - else: - return None - return wrapper - - -def authentication(func): - '''Decorated function is skipped if authentication is disabled.''' - @wraps(func) - def wrapper(*args, **kw): - auth = hookenv.config()['authenticator'] - if auth == 'PasswordAuthenticator': - return func(*args, **kw) - elif auth == 'AllowAllAuthenticator': - hookenv.log('Skipped. Authentication disabled.', DEBUG) - return None - helpers.status_set('blocked', 'Unknown authenticator {}'.format(auth)) - raise SystemExit(0) - return wrapper - - -@action -def set_proxy(): - import hooks - hooks.set_proxy() - - -@action -def revert_unchangeable_config(): - config = hookenv.config() - - # config.previous() only becomes meaningful after the install - # hook has run. During the first run on the unit hook, it - # reports everything has having None as the previous value. - if config._prev_dict is None: - return - - for key in UNCHANGEABLE_KEYS: - if config.changed(key): - previous = config.previous(key) - hookenv.log('{} cannot be changed after service deployment. ' - 'Using original setting {!r}'.format(key, previous), - ERROR) - config[key] = previous - - -# FOR CHARMHELPERS -@action -def preinstall(): - '''Preinstallation data_ready hook.''' - # Only run the preinstall hooks from the actual install hook. - if hookenv.hook_name() == 'install': - # Pre-exec - pattern = os.path.join(hookenv.charm_dir(), - 'exec.d', '*', 'charm-pre-install') - for f in sorted(glob.glob(pattern)): - if os.path.isfile(f) and os.access(f, os.X_OK): - hookenv.log('Running preinstall hook {}'.format(f)) - subprocess.check_call(['sh', '-c', f]) - else: - hookenv.log('Ingnoring preinstall hook {}'.format(f), - WARNING) - else: - hookenv.log('No preinstall hooks found') - - -# FOR CHARMHELPERS -@action -def swapoff(fstab='/etc/fstab'): - '''Turn off swapping in the container, permanently.''' - # Turn off swap in the current session - if helpers.is_lxc(): - hookenv.log("In an LXC. Not touching swap.") - return - else: - try: - subprocess.check_call(['swapoff', '-a']) - except Exception as e: - hookenv.log("Got an error trying to turn off swapping. {}. " - "We may be in an LXC. Exiting gracefully" - "".format(e), WARNING) - return - - # Disable swap permanently - with closing(Fstab(fstab)) as fstab: - while True: - swap_entry = fstab.get_entry_by_attr('filesystem', 'swap') - if swap_entry is None: - break - fstab.remove_entry(swap_entry) - - -# FOR CHARMHELPERS -@action -def configure_sources(): - '''Standard charmhelpers package source configuration.''' - config = hookenv.config() - if config.changed('install_sources') or config.changed('install_keys'): - fetch.configure_sources(True) - - -@action -def add_implicit_package_signing_keys(): - # Rather than blindly add these keys, we should sniff - # config['install_sources'] for apache.org or datastax.com urls and - # add only the appropriate keys. - for key in ('apache', 'datastax'): - path = os.path.join(hookenv.charm_dir(), 'lib', '{}.key'.format(key)) - subprocess.check_call(['apt-key', 'add', path], - stdin=subprocess.DEVNULL) - - -@action -def reset_sysctl(): - '''Configure sysctl settings for Cassandra''' - if helpers.is_lxc(): - hookenv.log("In an LXC. Leaving sysctl unchanged.") - else: - cassandra_sysctl_file = os.path.join('/', 'etc', 'sysctl.d', - '99-cassandra.conf') - contents = b"vm.max_map_count = 131072\n" - try: - host.write_file(cassandra_sysctl_file, contents) - subprocess.check_call(['sysctl', '-p', cassandra_sysctl_file]) - except OSError as e: - if e.errno == errno.EACCES: - hookenv.log("Got Permission Denied trying to set the " - "sysctl settings at {}. We may be in an LXC. " - "Exiting gracefully".format(cassandra_sysctl_file), - WARNING) - else: - raise - - -@action -def reset_limits(): - '''Set /etc/security/limits.d correctly for Ubuntu, so the - startup scripts don't emit a spurious warning. - - Per Cassandra documentation, Ubuntu needs some extra - twiddling in /etc/security/limits.d. I have no idea why - the packages don't do this, since they are already - setting limits for the cassandra user correctly. The real - bug is that the limits of the user running the startup script - are being checked, rather than the limits of the user that will - actually run the process. - ''' - contents = dedent('''\ - # Maintained by Juju - root - memlock unlimited - root - nofile 100000 - root - nproc 32768 - root - as unlimited - ubuntu - memlock unlimited - ubuntu - nofile 100000 - ubuntu - nproc 32768 - ubuntu - as unlimited - ''') - host.write_file('/etc/security/limits.d/cassandra-charm.conf', - contents.encode('US-ASCII')) - - -@action -def install_cassandra_packages(): - helpers.install_packages(helpers.get_cassandra_packages()) - if helpers.get_jre() != 'oracle': - subprocess.check_call(['update-java-alternatives', - '--jre-headless', - '--set', 'java-1.8.0-openjdk-amd64']) - - -@action -def ensure_cassandra_package_status(): - helpers.ensure_package_status(helpers.get_cassandra_packages()) - - -def _fetch_oracle_jre(): - config = hookenv.config() - url = config.get('private_jre_url', None) - if url and config.get('retrieved_jre', None) != url: - filename = os.path.join(hookenv.charm_dir(), - 'lib', url.split('/')[-1]) - if not filename.endswith('-linux-x64.tar.gz'): - helpers.status_set('blocked', - 'Invalid private_jre_url {}'.format(url)) - raise SystemExit(0) - helpers.status_set(hookenv.status_get()[0], - 'Downloading Oracle JRE') - hookenv.log('Oracle JRE URL is {}'.format(url)) - urllib.request.urlretrieve(url, filename) - config['retrieved_jre'] = url - - pattern = os.path.join(hookenv.charm_dir(), - 'lib', 'server-jre-?u*-linux-x64.tar.gz') - tarballs = glob.glob(pattern) - if not (url or tarballs): - helpers.status_set('blocked', - 'private_jre_url not set and no local tarballs.') - raise SystemExit(0) - - elif not tarballs: - helpers.status_set('blocked', - 'Oracle JRE tarball not found ({})'.format(pattern)) - raise SystemExit(0) - - # Latest tarball by filename/version num. Lets hope they don't hit - # 99 (currently at 76). - tarball = sorted(tarballs)[-1] - return tarball - - -def _install_oracle_jre_tarball(tarball): - # Same directory as webupd8 to avoid surprising people, but it could - # be anything. - if 'jre-7u' in str(tarball): - dest = '/usr/lib/jvm/java-7-oracle' - else: - dest = '/usr/lib/jvm/java-8-oracle' - - if not os.path.isdir(dest): - host.mkdir(dest) - - jre_exists = os.path.exists(os.path.join(dest, 'bin', 'java')) - - config = hookenv.config() - - # Unpack the latest tarball if necessary. - if config.get('oracle_jre_tarball', '') == tarball and jre_exists: - hookenv.log('Already installed {}'.format(tarball)) - else: - hookenv.log('Unpacking {}'.format(tarball)) - subprocess.check_call(['tar', '-xz', '-C', dest, - '--strip-components=1', '-f', tarball]) - config['oracle_jre_tarball'] = tarball - - # Set alternatives, so /usr/bin/java does what we want. - for tool in ['java', 'javac']: - tool_path = os.path.join(dest, 'bin', tool) - subprocess.check_call(['update-alternatives', '--install', - os.path.join('/usr/bin', tool), - tool, tool_path, '1']) - subprocess.check_call(['update-alternatives', - '--set', tool, tool_path]) - - -@action -def install_oracle_jre(): - if helpers.get_jre() != 'oracle': - return - - tarball = _fetch_oracle_jre() - _install_oracle_jre_tarball(tarball) - - -@action -def emit_java_version(): - # Log the version for posterity. Could be useful since Oracle JRE - # security updates are not automated. - version = subprocess.check_output(['java', '-version'], - universal_newlines=True) - for line in version.splitlines(): - hookenv.log('JRE: {}'.format(line)) - - -@action -def emit_meminfo(): - helpers.emit(subprocess.check_output(['free', '--human'], - universal_newlines=True)) - - -@action -def configure_cassandra_yaml(): - helpers.configure_cassandra_yaml() - - -@action -def configure_cassandra_env(): - cassandra_env_path = helpers.get_cassandra_env_file() - assert os.path.exists(cassandra_env_path) - - helpers.maybe_backup(cassandra_env_path) - - overrides = [ - ('max_heap_size', re.compile(r'^#?(MAX_HEAP_SIZE)=(.*)$', re.M)), - ('heap_newsize', re.compile(r'^#?(HEAP_NEWSIZE)=(.*)$', re.M)), - # We don't allow this to be overridden to ensure that tools - # will find JMX using the default port. - # ('jmx_port', re.compile(r'^#?(JMX_PORT)=(.*)$', re.M)), - ] - - with open(cassandra_env_path, 'r') as f: - env = f.read() - - config = hookenv.config() - for key, regexp in overrides: - if config[key]: - val = shlex.quote(str(config[key])) - env = regexp.sub(r'\g<1>={}'.format(val), - env) - else: - env = regexp.sub(r'#\1=\2', env) - host.write_file(cassandra_env_path, env.encode('UTF-8')) - - -@action -def configure_cassandra_rackdc(): - config = hookenv.config() - datacenter = config['datacenter'].strip() - rack = config['rack'].strip() or hookenv.service_name() - rackdc_properties = dedent('''\ - dc={} - rack={} - ''').format(datacenter, rack) - rackdc_path = helpers.get_cassandra_rackdc_file() - host.write_file(rackdc_path, rackdc_properties.encode('UTF-8')) - - -def needs_reset_auth_keyspace_replication(): - '''Guard for reset_auth_keyspace_replication.''' - num_nodes = helpers.num_nodes() - datacenter = hookenv.config()['datacenter'] - with helpers.connect() as session: - strategy_opts = helpers.get_auth_keyspace_replication(session) - rf = int(strategy_opts.get(datacenter, -1)) - hookenv.log('system_auth rf={!r}'.format(strategy_opts)) - # If the node count has changed, we should change the rf. - return rf != num_nodes - - -@leader_only -@action -@authentication -@coordinator.require('repair', needs_reset_auth_keyspace_replication) -def reset_auth_keyspace_replication(): - # Cassandra requires you to manually set the replication factor of - # the system_auth keyspace, to ensure availability and redundancy. - # The recommendation is to set the replication factor so that every - # node has a copy. - num_nodes = helpers.num_nodes() - datacenter = hookenv.config()['datacenter'] - with helpers.connect() as session: - strategy_opts = helpers.get_auth_keyspace_replication(session) - rf = int(strategy_opts.get(datacenter, -1)) - hookenv.log('system_auth rf={!r}'.format(strategy_opts)) - if rf != num_nodes: - strategy_opts['class'] = 'NetworkTopologyStrategy' - strategy_opts[datacenter] = num_nodes - if 'replication_factor' in strategy_opts: - del strategy_opts['replication_factor'] - helpers.set_auth_keyspace_replication(session, strategy_opts) - if rf < num_nodes: - # Increasing rf, need to run repair. - helpers.repair_auth_keyspace() - helpers.set_active() - - -@action -def store_unit_private_ip(): - '''Store the unit's private ip address, so we can tell if it changes.''' - hookenv.config()['unit_private_ip'] = hookenv.unit_private_ip() - - -def needs_restart(): - '''Return True if Cassandra is not running or needs to be restarted.''' - if helpers.is_decommissioned(): - # Decommissioned nodes are never restarted. They remain up - # telling everyone they are decommissioned. - helpers.status_set('blocked', 'Decommissioned node') - return False - - if not helpers.is_cassandra_running(): - if helpers.is_bootstrapped(): - helpers.status_set('waiting', 'Waiting for permission to start') - else: - helpers.status_set('waiting', - 'Waiting for permission to bootstrap') - return True - - config = hookenv.config() - - # If our IP address has changed, we need to restart. - if config.changed('unit_private_ip'): - helpers.status_set('waiting', 'IP address changed. ' - 'Waiting for restart permission.') - return True - - # If the directory paths have changed, we need to migrate data - # during a restart. - storage = relations.StorageRelation() - if storage.needs_remount(): - helpers.status_set(hookenv.status_get()[0], - 'New mounts. Waiting for restart permission') - return True - - # If any of these config items changed, a restart is required. - for key in RESTART_REQUIRED_KEYS: - if config.changed(key): - hookenv.log('{} changed. Restart required.'.format(key)) - for key in RESTART_REQUIRED_KEYS: - if config.changed(key): - helpers.status_set(hookenv.status_get()[0], - 'Config changes. ' - 'Waiting for restart permission.') - return True - - # If we have new seeds, we should restart. - new_seeds = helpers.get_seed_ips() - if config.get('configured_seeds') != sorted(new_seeds): - old_seeds = set(config.previous('configured_seeds') or []) - changed = old_seeds.symmetric_difference(new_seeds) - # We don't care about the local node in the changes. - changed.discard(hookenv.unit_private_ip()) - if changed: - helpers.status_set(hookenv.status_get()[0], - 'Updated seeds {!r}. ' - 'Waiting for restart permission.' - ''.format(new_seeds)) - return True - - hookenv.log('Restart not required') - return False - - -@action -@coordinator.require('restart', needs_restart) -def maybe_restart(): - '''Restart sequence. - - If a restart is needed, shutdown Cassandra, perform all pending operations - that cannot be be done while Cassandra is live, and restart it. - ''' - helpers.status_set('maintenance', 'Stopping Cassandra') - helpers.stop_cassandra() - helpers.remount_cassandra() - helpers.ensure_database_directories() - if helpers.peer_relid() and not helpers.is_bootstrapped(): - helpers.status_set('maintenance', 'Bootstrapping') - else: - helpers.status_set('maintenance', 'Starting Cassandra') - helpers.start_cassandra() - - -@action -def post_bootstrap(): - '''Maintain state on if the node has bootstrapped into the cluster. - - Per documented procedure for adding new units to a cluster, wait 2 - minutes if the unit has just bootstrapped to ensure other units - do not attempt bootstrap too soon. Also, wait until completed joining - to ensure we keep the lock and ensure other nodes don't restart or - bootstrap. - ''' - if not helpers.is_bootstrapped(): - if coordinator.relid is not None: - helpers.status_set('maintenance', 'Post-bootstrap 2 minute delay') - hookenv.log('Post-bootstrap 2 minute delay') - time.sleep(120) # Must wait 2 minutes between bootstrapping nodes. - - join_msg_set = False - while True: - status = helpers.get_node_status() - if status == 'NORMAL': - break - elif status == 'JOINING': - if not join_msg_set: - helpers.status_set('maintenance', 'Still joining cluster') - join_msg_set = True - time.sleep(10) - continue - else: - if status is None: - helpers.status_set('blocked', - 'Unexpectedly shutdown during ' - 'bootstrap') - else: - helpers.status_set('blocked', - 'Failed to bootstrap ({})' - ''.format(status)) - raise SystemExit(0) - - # Unconditionally call this to publish the bootstrapped flag to - # the peer relation, as the first unit was bootstrapped before - # the peer relation existed. - helpers.set_bootstrapped() - - -@action -def stop_cassandra(): - helpers.stop_cassandra() - - -@action -def start_cassandra(): - helpers.start_cassandra() - - -@leader_only -@action -@authentication -def create_unit_superusers(): - # The leader creates and updates accounts for nodes, using the - # encrypted password they provide in relations.PeerRelation. We - # don't end up with unencrypted passwords leaving the unit, and we - # don't need to restart Cassandra in no-auth mode which is slow and - # I worry may cause issues interrupting the bootstrap. - if not coordinator.relid: - return # No peer relation, no requests yet. - - created_units = helpers.get_unit_superusers() - uncreated_units = [u for u in hookenv.related_units(coordinator.relid) - if u not in created_units] - for peer in uncreated_units: - rel = hookenv.relation_get(unit=peer, rid=coordinator.relid) - username = rel.get('username') - pwhash = rel.get('pwhash') - if not username: - continue - hookenv.log('Creating {} account for {}'.format(username, peer)) - with helpers.connect() as session: - helpers.ensure_user(session, username, pwhash, superuser=True) - created_units.add(peer) - helpers.set_unit_superusers(created_units) - - -@action -def reset_all_io_schedulers(): - dirs = helpers.get_all_database_directories() - dirs = (dirs['data_file_directories'] + [dirs['commitlog_directory']] + - [dirs['saved_caches_directory']]) - config = hookenv.config() - for d in dirs: - if os.path.isdir(d): # Directory may not exist yet. - helpers.set_io_scheduler(config['io_scheduler'], d) - - -def _client_credentials(relid): - '''Return the client credentials used by relation relid.''' - relinfo = hookenv.relation_get(unit=hookenv.local_unit(), rid=relid) - username = relinfo.get('username') - password = relinfo.get('password') - if username is None or password is None: - for unit in hookenv.related_units(coordinator.relid): - try: - relinfo = hookenv.relation_get(unit=unit, rid=relid) - username = relinfo.get('username') - password = relinfo.get('password') - if username is not None and password is not None: - return username, password - except subprocess.CalledProcessError: - pass # Assume the remote unit has not joined yet. - return None, None - else: - return username, password - - -def _publish_database_relation(relid, superuser): - # The Casandra service needs to provide a common set of credentials - # to a client unit. The leader creates these, if none of the other - # units are found to have published them already (a previously elected - # leader may have done this). The leader then tickles the other units, - # firing a hook and giving them the opportunity to copy and publish - # these credentials. - username, password = _client_credentials(relid) - if username is None: - if hookenv.is_leader(): - # Credentials not set. The leader must generate them. We use - # the service name so that database permissions remain valid - # even after the relation is dropped and recreated, or the - # juju environment rebuild and the database restored from - # backups. - service_name = helpers.get_service_name(relid) - if not service_name: - # Per Bug #1555261, we might not yet have related units, - # so no way to calculate the remote service name and thus - # the user. - return # Try again later. - username = 'juju_{}'.format(helpers.get_service_name(relid)) - if superuser: - username += '_admin' - password = host.pwgen() - pwhash = helpers.encrypt_password(password) - with helpers.connect() as session: - helpers.ensure_user(session, username, pwhash, superuser) - # Wake the peers, if any. - helpers.leader_ping() - else: - return # No credentials yet. Nothing to do. - - # Publish the information the client needs on the relation where - # they can find it. - # - authentication credentials - # - address and port - # - cluster_name, so clients can differentiate multiple clusters - # - datacenter + rack, so clients know what names they can use - # when altering keyspace replication settings. - config = hookenv.config() - hookenv.relation_set(relid, - username=username, password=password, - host=hookenv.unit_public_ip(), - native_transport_port=config['native_transport_port'], - rpc_port=config['rpc_port'], - cluster_name=config['cluster_name'], - datacenter=config['datacenter'], - rack=config['rack']) - - -@action -def publish_database_relations(): - for relid in hookenv.relation_ids('database'): - _publish_database_relation(relid, superuser=False) - - -@action -def publish_database_admin_relations(): - for relid in hookenv.relation_ids('database-admin'): - _publish_database_relation(relid, superuser=True) - - -@action -def install_maintenance_crontab(): - # Every unit should run repair once per week (at least once per - # GCGraceSeconds, which defaults to 10 days but can be changed per - # keyspace). # Distribute the repair time evenly over the week. - unit_num = int(hookenv.local_unit().split('/')[-1]) - dow, hour, minute = helpers.week_spread(unit_num) - contents = jinja.render('cassandra_maintenance_cron.tmpl', vars()) - cron_path = "/etc/cron.d/cassandra-maintenance" - host.write_file(cron_path, contents.encode('US-ASCII')) - - -@action -def emit_cluster_info(): - helpers.emit_describe_cluster() - helpers.emit_status() - helpers.emit_netstats() - - -@action -def configure_firewall(): - '''Configure firewall rules using ufw. - - This is primarily to block access to the replication and JMX ports, - as juju's default port access controls are not strict enough and - allow access to the entire environment. - ''' - config = hookenv.config() - ufw.enable(soft_fail=True) - - # Enable SSH from anywhere, relying on Juju and external firewalls - # to control access. - ufw.service('ssh', 'open') - ufw.service('nrpe', 'open') # Also NRPE for nagios checks. - ufw.service('rsync', 'open') # Also rsync for data transfer and backups. - - # Clients need client access. These protocols are configured to - # require authentication. - client_keys = ['native_transport_port', 'rpc_port'] - client_ports = [config[key] for key in client_keys] - - # Peers need replication access. This protocols does not - # require authentication, so firewall it from other nodes. - peer_ports = [config['storage_port'], config['ssl_storage_port']] - - # Enable client access from anywhere. Juju and external firewalls - # can still restrict this further of course (ie. 'juju expose'). - for key in client_keys: - if config.changed(key) and config.previous(key) is not None: - # First close old ports. We use this order in the unlikely case - # someone is trying to swap the native and Thrift ports. - ufw.service(config.previous(key), 'close') - for port in client_ports: - # Then open or close the configured ports. - ufw.service(port, 'open') - - desired_rules = set() # ufw.grant_access/remove_access commands. - - # Rules for peers - for relinfo in hookenv.relations_of_type('cluster'): - if relinfo['private-address']: - pa = hookenv._ensure_ip(relinfo['private-address']) - for port in peer_ports: - desired_rules.add((pa, 'any', port)) - # Rules for admin connections. We allow database-admin relations access - # to the cluster communication ports so that tools like sstableloader - # can run. - for relinfo in hookenv.relations_of_type('database-admin'): - if relinfo['private-address']: - pa = hookenv._ensure_ip(relinfo['private-address']) - for port in peer_ports: - desired_rules.add((pa, 'any', port)) - - previous_rules = set(tuple(rule) for rule in config.get('ufw_rules', [])) - - # Close any rules previously opened that are no longer desired. - for rule in sorted(list(previous_rules - desired_rules)): - ufw.revoke_access(*rule) - - # Open all the desired rules. - for rule in sorted(list(desired_rules)): - ufw.grant_access(*rule) - - # Store our rules for next time. Note that this is inherantly racy - - # this value is only persisted if the hook exits cleanly. If the - # hook fails, then someone changes port configuration or IP - # addresses change, then the failed hook retried, we can lose track - # of previously granted rules and they will never be revoked. It is - # impossible to remove this race entirely, so we stick with this - # simple approach. - config['ufw_rules'] = list(desired_rules) # A list because JSON. - - -@action -def nrpe_external_master_relation(): - ''' Configure the nrpe-external-master relation ''' - local_plugins = helpers.local_plugins_dir() - if os.path.exists(local_plugins): - src = os.path.join(hookenv.charm_dir(), - "files", "check_cassandra_heap.sh") - with open(src, 'rb') as f: - host.write_file(os.path.join(local_plugins, - 'check_cassandra_heap.sh'), - f.read(), perms=0o555) - - nrpe_compat = nrpe.NRPE() - conf = hookenv.config() - - cassandra_heap_warn = conf.get('nagios_heapchk_warn_pct') - cassandra_heap_crit = conf.get('nagios_heapchk_crit_pct') - if cassandra_heap_warn and cassandra_heap_crit: - nrpe_compat.add_check( - shortname="cassandra_heap", - description="Check Cassandra Heap", - check_cmd="check_cassandra_heap.sh localhost {} {}" - "".format(cassandra_heap_warn, cassandra_heap_crit)) - - cassandra_disk_warn = conf.get('nagios_disk_warn_pct') - cassandra_disk_crit = conf.get('nagios_disk_crit_pct') - dirs = helpers.get_all_database_directories() - dirs = set(dirs['data_file_directories'] + - [dirs['commitlog_directory'], dirs['saved_caches_directory']]) - # We need to check the space on the mountpoint, not on the actual - # directory, as the nagios user won't have access to the actual directory. - mounts = set(helpers.mountpoint(d) for d in dirs) - for disk in mounts: - check_name = re.sub('[^A-Za-z0-9_]', '_', disk) - if cassandra_disk_warn and cassandra_disk_crit: - shortname = "cassandra_disk{}".format(check_name) - hookenv.log("Adding disk utilization check {}".format(shortname), - DEBUG) - nrpe_compat.add_check( - shortname=shortname, - description="Check Cassandra Disk {}".format(disk), - check_cmd="check_disk -u GB -w {}% -c {}% -K 5% -p {}" - "".format(cassandra_disk_warn, cassandra_disk_crit, - disk)) - nrpe_compat.write() - - -@leader_only -@action -def maintain_seeds(): - '''The leader needs to maintain the list of seed nodes''' - seed_ips = helpers.get_seed_ips() - hookenv.log('Current seeds == {!r}'.format(seed_ips), DEBUG) - - bootstrapped_ips = helpers.get_bootstrapped_ips() - hookenv.log('Bootstrapped == {!r}'.format(bootstrapped_ips), DEBUG) - - # Remove any seeds that are no longer bootstrapped, such as dropped - # units. - seed_ips.intersection_update(bootstrapped_ips) - - # Add more bootstrapped nodes, if necessary, to get to our maximum - # of 3 seeds. - potential_seed_ips = list(reversed(sorted(bootstrapped_ips))) - while len(seed_ips) < 3 and potential_seed_ips: - seed_ips.add(potential_seed_ips.pop()) - - # If there are no seeds or bootstrapped nodes, start with the leader. Us. - if len(seed_ips) == 0: - seed_ips.add(hookenv.unit_private_ip()) - - hookenv.log('Updated seeds == {!r}'.format(seed_ips), DEBUG) - - hookenv.leader_set(seeds=','.join(sorted(seed_ips))) - - -@leader_only -@action -@authentication -def reset_default_password(): - if hookenv.leader_get('default_admin_password_changed'): - hookenv.log('Default admin password already changed') - return - - # Cassandra ships with well known credentials, rather than - # providing a tool to reset credentials. This is a huge security - # hole we must close. - try: - # We need a big timeout here, as the cassandra user actually - # springs into existence some time after Cassandra has started - # up and is accepting connections. - with helpers.connect('cassandra', 'cassandra', - timeout=120, auth_timeout=120) as session: - # But before we close this security hole, we need to use these - # credentials to create a different admin account for the - # leader, allowing it to create accounts for other nodes as they - # join. The alternative is restarting Cassandra without - # authentication, which this charm will likely need to do in the - # future when we allow Cassandra services to be related together. - helpers.status_set('maintenance', - 'Creating initial superuser account') - username, password = helpers.superuser_credentials() - pwhash = helpers.encrypt_password(password) - helpers.ensure_user(session, username, pwhash, superuser=True) - helpers.set_unit_superusers([hookenv.local_unit()]) - - helpers.status_set('maintenance', - 'Changing default admin password') - helpers.query(session, 'ALTER USER cassandra WITH PASSWORD %s', - cassandra.ConsistencyLevel.ALL, (host.pwgen(),)) - except cassandra.AuthenticationFailed: - hookenv.log('Default superuser account already reset') - try: - with helpers.connect(): - hookenv.log("Leader's superuser account already created") - except cassandra.AuthenticationFailed: - # We have no known superuser credentials. Create the account - # the hard, slow way. This will be the normal method - # of creating the service's initial account when we allow - # services to be related together. - helpers.create_unit_superuser_hard() - - hookenv.leader_set(default_admin_password_changed=True) - - -@action -def set_active(): - # If we got this far, the unit is active. Update the status if it is - # not already active. We don't do this unconditionally, as the charm - # may be active but doing stuff, like active but waiting for restart - # permission. - if hookenv.status_get()[0] != 'active': - helpers.set_active() - else: - hookenv.log('Unit status already active', DEBUG) - - -@action -@authentication -def request_unit_superuser(): - relid = helpers.peer_relid() - if relid is None: - hookenv.log('Request deferred until peer relation exists') - return - - relinfo = hookenv.relation_get(unit=hookenv.local_unit(), - rid=relid) - if relinfo and relinfo.get('username'): - # We must avoid blindly setting the pwhash on the relation, - # as we will likely get a different value everytime we - # encrypt the password due to the random salt. - hookenv.log('Superuser account request previously made') - else: - # Publish the requested superuser and hash to our peers. - username, password = helpers.superuser_credentials() - pwhash = helpers.encrypt_password(password) - hookenv.relation_set(relid, username=username, pwhash=pwhash) - hookenv.log('Requested superuser account creation') - - -@action -def update_etc_hosts(): - hostname = socket.gethostname() - addr = hookenv.unit_private_ip() - hosts_map = {addr: hostname} - # only need to add myself to /etc/hosts - helpers.update_hosts_file('/etc/hosts', hosts_map) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/__init__.py deleted file mode 100644 index f72e7f8..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py deleted file mode 100644 index 1d039ea..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/benchmark/__init__.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import subprocess -import time -import os -from distutils.spawn import find_executable - -from charmhelpers.core.hookenv import ( - in_relation_hook, - relation_ids, - relation_set, - relation_get, -) - - -def action_set(key, val): - if find_executable('action-set'): - action_cmd = ['action-set'] - - if isinstance(val, dict): - for k, v in iter(val.items()): - action_set('%s.%s' % (key, k), v) - return True - - action_cmd.append('%s=%s' % (key, val)) - subprocess.check_call(action_cmd) - return True - return False - - -class Benchmark(): - """ - Helper class for the `benchmark` interface. - - :param list actions: Define the actions that are also benchmarks - - From inside the benchmark-relation-changed hook, you would - Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) - - Examples: - - siege = Benchmark(['siege']) - siege.start() - [... run siege ...] - # The higher the score, the better the benchmark - siege.set_composite_score(16.70, 'trans/sec', 'desc') - siege.finish() - - - """ - - BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing - - required_keys = [ - 'hostname', - 'port', - 'graphite_port', - 'graphite_endpoint', - 'api_port' - ] - - def __init__(self, benchmarks=None): - if in_relation_hook(): - if benchmarks is not None: - for rid in sorted(relation_ids('benchmark')): - relation_set(relation_id=rid, relation_settings={ - 'benchmarks': ",".join(benchmarks) - }) - - # Check the relation data - config = {} - for key in self.required_keys: - val = relation_get(key) - if val is not None: - config[key] = val - else: - # We don't have all of the required keys - config = {} - break - - if len(config): - with open(self.BENCHMARK_CONF, 'w') as f: - for key, val in iter(config.items()): - f.write("%s=%s\n" % (key, val)) - - @staticmethod - def start(): - action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) - - """ - If the collectd charm is also installed, tell it to send a snapshot - of the current profile data. - """ - COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' - if os.path.exists(COLLECT_PROFILE_DATA): - subprocess.check_output([COLLECT_PROFILE_DATA]) - - @staticmethod - def finish(): - action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) - - @staticmethod - def set_composite_score(value, units, direction='asc'): - """ - Set the composite score for a benchmark run. This is a single number - representative of the benchmark results. This could be the most - important metric, or an amalgamation of metric scores. - """ - return action_set( - "meta.composite", - {'value': value, 'units': units, 'direction': direction} - ) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py deleted file mode 100644 index 2f24642..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/nrpe.py +++ /dev/null @@ -1,398 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -"""Compatibility with the nrpe-external-master charm""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Matthew Wedgwood <matthew.wedgwood@canonical.com> - -import subprocess -import pwd -import grp -import os -import glob -import shutil -import re -import shlex -import yaml - -from charmhelpers.core.hookenv import ( - config, - local_unit, - log, - relation_ids, - relation_set, - relations_of_type, -) - -from charmhelpers.core.host import service - -# This module adds compatibility with the nrpe-external-master and plain nrpe -# subordinate charms. To use it in your charm: -# -# 1. Update metadata.yaml -# -# provides: -# (...) -# nrpe-external-master: -# interface: nrpe-external-master -# scope: container -# -# and/or -# -# provides: -# (...) -# local-monitors: -# interface: local-monitors -# scope: container - -# -# 2. Add the following to config.yaml -# -# nagios_context: -# default: "juju" -# type: string -# description: | -# Used by the nrpe subordinate charms. -# A string that will be prepended to instance name to set the host name -# in nagios. So for instance the hostname would be something like: -# juju-myservice-0 -# If you're running multiple environments with the same services in them -# this allows you to differentiate between them. -# nagios_servicegroups: -# default: "" -# type: string -# description: | -# A comma-separated list of nagios servicegroups. -# If left empty, the nagios_context will be used as the servicegroup -# -# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master -# -# 4. Update your hooks.py with something like this: -# -# from charmsupport.nrpe import NRPE -# (...) -# def update_nrpe_config(): -# nrpe_compat = NRPE() -# nrpe_compat.add_check( -# shortname = "myservice", -# description = "Check MyService", -# check_cmd = "check_http -w 2 -c 10 http://localhost" -# ) -# nrpe_compat.add_check( -# "myservice_other", -# "Check for widget failures", -# check_cmd = "/srv/myapp/scripts/widget_check" -# ) -# nrpe_compat.write() -# -# def config_changed(): -# (...) -# update_nrpe_config() -# -# def nrpe_external_master_relation_changed(): -# update_nrpe_config() -# -# def local_monitors_relation_changed(): -# update_nrpe_config() -# -# 5. ln -s hooks.py nrpe-external-master-relation-changed -# ln -s hooks.py local-monitors-relation-changed - - -class CheckException(Exception): - pass - - -class Check(object): - shortname_re = '[A-Za-z0-9-_]+$' - service_template = (""" -#--------------------------------------------------- -# This file is Juju managed -#--------------------------------------------------- -define service {{ - use active-service - host_name {nagios_hostname} - service_description {nagios_hostname}[{shortname}] """ - """{description} - check_command check_nrpe!{command} - servicegroups {nagios_servicegroup} -}} -""") - - def __init__(self, shortname, description, check_cmd): - super(Check, self).__init__() - # XXX: could be better to calculate this from the service name - if not re.match(self.shortname_re, shortname): - raise CheckException("shortname must match {}".format( - Check.shortname_re)) - self.shortname = shortname - self.command = "check_{}".format(shortname) - # Note: a set of invalid characters is defined by the - # Nagios server config - # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= - self.description = description - self.check_cmd = self._locate_cmd(check_cmd) - - def _get_check_filename(self): - return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) - - def _get_service_filename(self, hostname): - return os.path.join(NRPE.nagios_exportdir, - 'service__{}_{}.cfg'.format(hostname, self.command)) - - def _locate_cmd(self, check_cmd): - search_path = ( - '/usr/lib/nagios/plugins', - '/usr/local/lib/nagios/plugins', - ) - parts = shlex.split(check_cmd) - for path in search_path: - if os.path.exists(os.path.join(path, parts[0])): - command = os.path.join(path, parts[0]) - if len(parts) > 1: - command += " " + " ".join(parts[1:]) - return command - log('Check command not found: {}'.format(parts[0])) - return '' - - def _remove_service_files(self): - if not os.path.exists(NRPE.nagios_exportdir): - return - for f in os.listdir(NRPE.nagios_exportdir): - if f.endswith('_{}.cfg'.format(self.command)): - os.remove(os.path.join(NRPE.nagios_exportdir, f)) - - def remove(self, hostname): - nrpe_check_file = self._get_check_filename() - if os.path.exists(nrpe_check_file): - os.remove(nrpe_check_file) - self._remove_service_files() - - def write(self, nagios_context, hostname, nagios_servicegroups): - nrpe_check_file = self._get_check_filename() - with open(nrpe_check_file, 'w') as nrpe_check_config: - nrpe_check_config.write("# check {}\n".format(self.shortname)) - nrpe_check_config.write("command[{}]={}\n".format( - self.command, self.check_cmd)) - - if not os.path.exists(NRPE.nagios_exportdir): - log('Not writing service config as {} is not accessible'.format( - NRPE.nagios_exportdir)) - else: - self.write_service_config(nagios_context, hostname, - nagios_servicegroups) - - def write_service_config(self, nagios_context, hostname, - nagios_servicegroups): - self._remove_service_files() - - templ_vars = { - 'nagios_hostname': hostname, - 'nagios_servicegroup': nagios_servicegroups, - 'description': self.description, - 'shortname': self.shortname, - 'command': self.command, - } - nrpe_service_text = Check.service_template.format(**templ_vars) - nrpe_service_file = self._get_service_filename(hostname) - with open(nrpe_service_file, 'w') as nrpe_service_config: - nrpe_service_config.write(str(nrpe_service_text)) - - def run(self): - subprocess.call(self.check_cmd) - - -class NRPE(object): - nagios_logdir = '/var/log/nagios' - nagios_exportdir = '/var/lib/nagios/export' - nrpe_confdir = '/etc/nagios/nrpe.d' - - def __init__(self, hostname=None): - super(NRPE, self).__init__() - self.config = config() - self.nagios_context = self.config['nagios_context'] - if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: - self.nagios_servicegroups = self.config['nagios_servicegroups'] - else: - self.nagios_servicegroups = self.nagios_context - self.unit_name = local_unit().replace('/', '-') - if hostname: - self.hostname = hostname - else: - nagios_hostname = get_nagios_hostname() - if nagios_hostname: - self.hostname = nagios_hostname - else: - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) - self.checks = [] - - def add_check(self, *args, **kwargs): - self.checks.append(Check(*args, **kwargs)) - - def remove_check(self, *args, **kwargs): - if kwargs.get('shortname') is None: - raise ValueError('shortname of check must be specified') - - # Use sensible defaults if they're not specified - these are not - # actually used during removal, but they're required for constructing - # the Check object; check_disk is chosen because it's part of the - # nagios-plugins-basic package. - if kwargs.get('check_cmd') is None: - kwargs['check_cmd'] = 'check_disk' - if kwargs.get('description') is None: - kwargs['description'] = '' - - check = Check(*args, **kwargs) - check.remove(self.hostname) - - def write(self): - try: - nagios_uid = pwd.getpwnam('nagios').pw_uid - nagios_gid = grp.getgrnam('nagios').gr_gid - except: - log("Nagios user not set up, nrpe checks not updated") - return - - if not os.path.exists(NRPE.nagios_logdir): - os.mkdir(NRPE.nagios_logdir) - os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) - - nrpe_monitors = {} - monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} - for nrpecheck in self.checks: - nrpecheck.write(self.nagios_context, self.hostname, - self.nagios_servicegroups) - nrpe_monitors[nrpecheck.shortname] = { - "command": nrpecheck.command, - } - - service('restart', 'nagios-nrpe-server') - - monitor_ids = relation_ids("local-monitors") + \ - relation_ids("nrpe-external-master") - for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) - - -def get_nagios_hostcontext(relation_name='nrpe-external-master'): - """ - Query relation with nrpe subordinate, return the nagios_host_context - - :param str relation_name: Name of relation nrpe sub joined to - """ - for rel in relations_of_type(relation_name): - if 'nagios_host_context' in rel: - return rel['nagios_host_context'] - - -def get_nagios_hostname(relation_name='nrpe-external-master'): - """ - Query relation with nrpe subordinate, return the nagios_hostname - - :param str relation_name: Name of relation nrpe sub joined to - """ - for rel in relations_of_type(relation_name): - if 'nagios_hostname' in rel: - return rel['nagios_hostname'] - - -def get_nagios_unit_name(relation_name='nrpe-external-master'): - """ - Return the nagios unit name prepended with host_context if needed - - :param str relation_name: Name of relation nrpe sub joined to - """ - host_context = get_nagios_hostcontext(relation_name) - if host_context: - unit = "%s:%s" % (host_context, local_unit()) - else: - unit = local_unit() - return unit - - -def add_init_service_checks(nrpe, services, unit_name): - """ - Add checks for each service in list - - :param NRPE nrpe: NRPE object to add check to - :param list services: List of services to check - :param str unit_name: Unit name to use in check description - """ - for svc in services: - upstart_init = '/etc/init/%s.conf' % svc - sysv_init = '/etc/init.d/%s' % svc - if os.path.exists(upstart_init): - # Don't add a check for these services from neutron-gateway - if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_upstart_job %s' % svc - ) - elif os.path.exists(sysv_init): - cronpath = '/etc/cron.d/nagios-service-check-%s' % svc - cron_file = ('*/5 * * * * root ' - '/usr/local/lib/nagios/plugins/check_exit_status.pl ' - '-s /etc/init.d/%s status > ' - '/var/lib/nagios/service-check-%s.txt\n' % (svc, - svc) - ) - f = open(cronpath, 'w') - f.write(cron_file) - f.close() - nrpe.add_check( - shortname=svc, - description='process check {%s}' % unit_name, - check_cmd='check_status_file.py -f ' - '/var/lib/nagios/service-check-%s.txt' % svc, - ) - - -def copy_nrpe_checks(): - """ - Copy the nrpe checks into place - - """ - NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - - if not os.path.exists(NAGIOS_PLUGINS): - os.makedirs(NAGIOS_PLUGINS) - for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): - if os.path.isfile(fname): - shutil.copy2(fname, - os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) - - -def add_haproxy_checks(nrpe, unit_name): - """ - Add checks for each service in list - - :param NRPE nrpe: NRPE object to add check to - :param str unit_name: Unit name to use in check description - """ - nrpe.add_check( - shortname='haproxy_servers', - description='Check HAProxy {%s}' % unit_name, - check_cmd='check_haproxy.sh') - nrpe.add_check( - shortname='haproxy_queue', - description='Check HAProxy queue depth {%s}' % unit_name, - check_cmd='check_haproxy_queue_depth.sh') diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py deleted file mode 100644 index 320961b..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/charmsupport/volumes.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -''' -Functions for managing volumes in juju units. One volume is supported per unit. -Subordinates may have their own storage, provided it is on its own partition. - -Configuration stanzas:: - - volume-ephemeral: - type: boolean - default: true - description: > - If false, a volume is mounted as sepecified in "volume-map" - If true, ephemeral storage will be used, meaning that log data - will only exist as long as the machine. YOU HAVE BEEN WARNED. - volume-map: - type: string - default: {} - description: > - YAML map of units to device names, e.g: - "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" - Service units will raise a configure-error if volume-ephemeral - is 'true' and no volume-map value is set. Use 'juju set' to set a - value and 'juju resolved' to complete configuration. - -Usage:: - - from charmsupport.volumes import configure_volume, VolumeConfigurationError - from charmsupport.hookenv import log, ERROR - def post_mount_hook(): - stop_service('myservice') - def post_mount_hook(): - start_service('myservice') - - if __name__ == '__main__': - try: - configure_volume(before_change=pre_mount_hook, - after_change=post_mount_hook) - except VolumeConfigurationError: - log('Storage could not be configured', ERROR) - -''' - -# XXX: Known limitations -# - fstab is neither consulted nor updated - -import os -from charmhelpers.core import hookenv -from charmhelpers.core import host -import yaml - - -MOUNT_BASE = '/srv/juju/volumes' - - -class VolumeConfigurationError(Exception): - '''Volume configuration data is missing or invalid''' - pass - - -def get_config(): - '''Gather and sanity-check volume configuration data''' - volume_config = {} - config = hookenv.config() - - errors = False - - if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): - volume_config['ephemeral'] = True - else: - volume_config['ephemeral'] = False - - try: - volume_map = yaml.safe_load(config.get('volume-map', '{}')) - except yaml.YAMLError as e: - hookenv.log("Error parsing YAML volume-map: {}".format(e), - hookenv.ERROR) - errors = True - if volume_map is None: - # probably an empty string - volume_map = {} - elif not isinstance(volume_map, dict): - hookenv.log("Volume-map should be a dictionary, not {}".format( - type(volume_map))) - errors = True - - volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) - if volume_config['device'] and volume_config['ephemeral']: - # asked for ephemeral storage but also defined a volume ID - hookenv.log('A volume is defined for this unit, but ephemeral ' - 'storage was requested', hookenv.ERROR) - errors = True - elif not volume_config['device'] and not volume_config['ephemeral']: - # asked for permanent storage but did not define volume ID - hookenv.log('Ephemeral storage was requested, but there is no volume ' - 'defined for this unit.', hookenv.ERROR) - errors = True - - unit_mount_name = hookenv.local_unit().replace('/', '-') - volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) - - if errors: - return None - return volume_config - - -def mount_volume(config): - if os.path.exists(config['mountpoint']): - if not os.path.isdir(config['mountpoint']): - hookenv.log('Not a directory: {}'.format(config['mountpoint'])) - raise VolumeConfigurationError() - else: - host.mkdir(config['mountpoint']) - if os.path.ismount(config['mountpoint']): - unmount_volume(config) - if not host.mount(config['device'], config['mountpoint'], persist=True): - raise VolumeConfigurationError() - - -def unmount_volume(config): - if os.path.ismount(config['mountpoint']): - if not host.umount(config['mountpoint'], persist=True): - raise VolumeConfigurationError() - - -def managed_mounts(): - '''List of all mounted managed volumes''' - return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) - - -def configure_volume(before_change=lambda: None, after_change=lambda: None): - '''Set up storage (or don't) according to the charm's volume configuration. - Returns the mount point or "ephemeral". before_change and after_change - are optional functions to be called if the volume configuration changes. - ''' - - config = get_config() - if not config: - hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) - raise VolumeConfigurationError() - - if config['ephemeral']: - if os.path.ismount(config['mountpoint']): - before_change() - unmount_volume(config) - after_change() - return 'ephemeral' - else: - # persistent storage - if os.path.ismount(config['mountpoint']): - mounts = dict(managed_mounts()) - if mounts.get(config['mountpoint']) != config['device']: - before_change() - unmount_volume(config) - mount_volume(config) - after_change() - else: - before_change() - mount_volume(config) - after_change() - return config['mountpoint'] diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py deleted file mode 100644 index b65d963..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/network/ufw.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -""" -This module contains helpers to add and remove ufw rules. - -Examples: - -- open SSH port for subnet 10.0.3.0/24: - - >>> from charmhelpers.contrib.network import ufw - >>> ufw.enable() - >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp') - -- open service by name as defined in /etc/services: - - >>> from charmhelpers.contrib.network import ufw - >>> ufw.enable() - >>> ufw.service('ssh', 'open') - -- close service by port number: - - >>> from charmhelpers.contrib.network import ufw - >>> ufw.enable() - >>> ufw.service('4949', 'close') # munin -""" -import re -import os -import subprocess - -from charmhelpers.core import hookenv -from charmhelpers.core.kernel import modprobe, is_module_loaded - -__author__ = "Felipe Reyes <felipe.reyes@canonical.com>" - - -class UFWError(Exception): - pass - - -class UFWIPv6Error(UFWError): - pass - - -def is_enabled(): - """ - Check if `ufw` is enabled - - :returns: True if ufw is enabled - """ - output = subprocess.check_output(['ufw', 'status'], - universal_newlines=True, - env={'LANG': 'en_US', - 'PATH': os.environ['PATH']}) - - m = re.findall(r'^Status: active\n', output, re.M) - - return len(m) >= 1 - - -def is_ipv6_ok(soft_fail=False): - """ - Check if IPv6 support is present and ip6tables functional - - :param soft_fail: If set to True and IPv6 support is broken, then reports - that the host doesn't have IPv6 support, otherwise a - UFWIPv6Error exception is raised. - :returns: True if IPv6 is working, False otherwise - """ - - # do we have IPv6 in the machine? - if os.path.isdir('/proc/sys/net/ipv6'): - # is ip6tables kernel module loaded? - if not is_module_loaded('ip6_tables'): - # ip6tables support isn't complete, let's try to load it - try: - modprobe('ip6_tables') - # great, we can load the module - return True - except subprocess.CalledProcessError as ex: - hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, - level="WARN") - # we are in a world where ip6tables isn't working - if soft_fail: - # so we inform that the machine doesn't have IPv6 - return False - else: - raise UFWIPv6Error("IPv6 firewall support broken") - else: - # the module is present :) - return True - - else: - # the system doesn't have IPv6 - return False - - -def disable_ipv6(): - """ - Disable ufw IPv6 support in /etc/default/ufw - """ - exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g', - '/etc/default/ufw']) - if exit_code == 0: - hookenv.log('IPv6 support in ufw disabled', level='INFO') - else: - hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR") - raise UFWError("Couldn't disable IPv6 support in ufw") - - -def enable(soft_fail=False): - """ - Enable ufw - - :param soft_fail: If set to True silently disables IPv6 support in ufw, - otherwise a UFWIPv6Error exception is raised when IP6 - support is broken. - :returns: True if ufw is successfully enabled - """ - if is_enabled(): - return True - - if not is_ipv6_ok(soft_fail): - disable_ipv6() - - output = subprocess.check_output(['ufw', 'enable'], - universal_newlines=True, - env={'LANG': 'en_US', - 'PATH': os.environ['PATH']}) - - m = re.findall('^Firewall is active and enabled on system startup\n', - output, re.M) - hookenv.log(output, level='DEBUG') - - if len(m) == 0: - hookenv.log("ufw couldn't be enabled", level='WARN') - return False - else: - hookenv.log("ufw enabled", level='INFO') - return True - - -def disable(): - """ - Disable ufw - - :returns: True if ufw is successfully disabled - """ - if not is_enabled(): - return True - - output = subprocess.check_output(['ufw', 'disable'], - universal_newlines=True, - env={'LANG': 'en_US', - 'PATH': os.environ['PATH']}) - - m = re.findall(r'^Firewall stopped and disabled on system startup\n', - output, re.M) - hookenv.log(output, level='DEBUG') - - if len(m) == 0: - hookenv.log("ufw couldn't be disabled", level='WARN') - return False - else: - hookenv.log("ufw disabled", level='INFO') - return True - - -def default_policy(policy='deny', direction='incoming'): - """ - Changes the default policy for traffic `direction` - - :param policy: allow, deny or reject - :param direction: traffic direction, possible values: incoming, outgoing, - routed - """ - if policy not in ['allow', 'deny', 'reject']: - raise UFWError(('Unknown policy %s, valid values: ' - 'allow, deny, reject') % policy) - - if direction not in ['incoming', 'outgoing', 'routed']: - raise UFWError(('Unknown direction %s, valid values: ' - 'incoming, outgoing, routed') % direction) - - output = subprocess.check_output(['ufw', 'default', policy, direction], - universal_newlines=True, - env={'LANG': 'en_US', - 'PATH': os.environ['PATH']}) - hookenv.log(output, level='DEBUG') - - m = re.findall("^Default %s policy changed to '%s'\n" % (direction, - policy), - output, re.M) - if len(m) == 0: - hookenv.log("ufw couldn't change the default policy to %s for %s" - % (policy, direction), level='WARN') - return False - else: - hookenv.log("ufw default policy for %s changed to %s" - % (direction, policy), level='INFO') - return True - - -def modify_access(src, dst='any', port=None, proto=None, action='allow', - index=None): - """ - Grant access to an address or subnet - - :param src: address (e.g. 192.168.1.234) or subnet - (e.g. 192.168.1.0/24). - :param dst: destiny of the connection, if the machine has multiple IPs and - connections to only one of those have to accepted this is the - field has to be set. - :param port: destiny port - :param proto: protocol (tcp or udp) - :param action: `allow` or `delete` - :param index: if different from None the rule is inserted at the given - `index`. - """ - if not is_enabled(): - hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') - return - - if action == 'delete': - cmd = ['ufw', 'delete', 'allow'] - elif index is not None: - cmd = ['ufw', 'insert', str(index), action] - else: - cmd = ['ufw', action] - - if src is not None: - cmd += ['from', src] - - if dst is not None: - cmd += ['to', dst] - - if port is not None: - cmd += ['port', str(port)] - - if proto is not None: - cmd += ['proto', proto] - - hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG') - p = subprocess.Popen(cmd, stdout=subprocess.PIPE) - (stdout, stderr) = p.communicate() - - hookenv.log(stdout, level='INFO') - - if p.returncode != 0: - hookenv.log(stderr, level='ERROR') - hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd), - p.returncode), - level='ERROR') - - -def grant_access(src, dst='any', port=None, proto=None, index=None): - """ - Grant access to an address or subnet - - :param src: address (e.g. 192.168.1.234) or subnet - (e.g. 192.168.1.0/24). - :param dst: destiny of the connection, if the machine has multiple IPs and - connections to only one of those have to accepted this is the - field has to be set. - :param port: destiny port - :param proto: protocol (tcp or udp) - :param index: if different from None the rule is inserted at the given - `index`. - """ - return modify_access(src, dst=dst, port=port, proto=proto, action='allow', - index=index) - - -def revoke_access(src, dst='any', port=None, proto=None): - """ - Revoke access to an address or subnet - - :param src: address (e.g. 192.168.1.234) or subnet - (e.g. 192.168.1.0/24). - :param dst: destiny of the connection, if the machine has multiple IPs and - connections to only one of those have to accepted this is the - field has to be set. - :param port: destiny port - :param proto: protocol (tcp or udp) - """ - return modify_access(src, dst=dst, port=port, proto=proto, action='delete') - - -def service(name, action): - """ - Open/close access to a service - - :param name: could be a service name defined in `/etc/services` or a port - number. - :param action: `open` or `close` - """ - if action == 'open': - subprocess.check_output(['ufw', 'allow', str(name)], - universal_newlines=True) - elif action == 'close': - subprocess.check_output(['ufw', 'delete', 'allow', str(name)], - universal_newlines=True) - else: - raise UFWError(("'{}' not supported, use 'allow' " - "or 'delete'").format(action)) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. diff --git a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py b/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py deleted file mode 100644 index c5efb16..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/contrib/templating/jinja.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -""" -Templating using the python-jinja2 package. -""" -import six -from charmhelpers.fetch import apt_install, apt_update -try: - import jinja2 -except ImportError: - apt_update(fatal=True) - if six.PY3: - apt_install(["python3-jinja2"], fatal=True) - else: - apt_install(["python-jinja2"], fatal=True) - import jinja2 - - -DEFAULT_TEMPLATES_DIR = 'templates' - - -def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR): - templates = jinja2.Environment( - loader=jinja2.FileSystemLoader(template_dir)) - template = templates.get_template(template_name) - return template.render(context) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py b/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py deleted file mode 100644 index 0303c3f..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/coordinator.py +++ /dev/null @@ -1,607 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. -''' -The coordinator module allows you to use Juju's leadership feature to -coordinate operations between units of a service. - -Behavior is defined in subclasses of coordinator.BaseCoordinator. -One implementation is provided (coordinator.Serial), which allows an -operation to be run on a single unit at a time, on a first come, first -served basis. You can trivially define more complex behavior by -subclassing BaseCoordinator or Serial. - -:author: Stuart Bishop <stuart.bishop@canonical.com> - - -Services Framework Usage -======================== - -Ensure a peers relation is defined in metadata.yaml. Instantiate a -BaseCoordinator subclass before invoking ServiceManager.manage(). -Ensure that ServiceManager.manage() is wired up to the leader-elected, -leader-settings-changed, peers relation-changed and peers -relation-departed hooks in addition to any other hooks you need, or your -service will deadlock. - -Ensure calls to acquire() are guarded, so that locks are only requested -when they are really needed (and thus hooks only triggered when necessary). -Failing to do this and calling acquire() unconditionally will put your unit -into a hook loop. Calls to granted() do not need to be guarded. - -For example:: - - from charmhelpers.core import hookenv, services - from charmhelpers import coordinator - - def maybe_restart(servicename): - serial = coordinator.Serial() - if needs_restart(): - serial.acquire('restart') - if serial.granted('restart'): - hookenv.service_restart(servicename) - - services = [dict(service='servicename', - data_ready=[maybe_restart])] - - if __name__ == '__main__': - _ = coordinator.Serial() # Must instantiate before manager.manage() - manager = services.ServiceManager(services) - manager.manage() - - -You can implement a similar pattern using a decorator. If the lock has -not been granted, an attempt to acquire() it will be made if the guard -function returns True. If the lock has been granted, the decorated function -is run as normal:: - - from charmhelpers.core import hookenv, services - from charmhelpers import coordinator - - serial = coordinator.Serial() # Global, instatiated on module import. - - def needs_restart(): - [ ... Introspect state. Return True if restart is needed ... ] - - @serial.require('restart', needs_restart) - def maybe_restart(servicename): - hookenv.service_restart(servicename) - - services = [dict(service='servicename', - data_ready=[maybe_restart])] - - if __name__ == '__main__': - manager = services.ServiceManager(services) - manager.manage() - - -Traditional Usage -================= - -Ensure a peers relation is defined in metadata.yaml. - -If you are using charmhelpers.core.hookenv.Hooks, ensure that a -BaseCoordinator subclass is instantiated before calling Hooks.execute. - -If you are not using charmhelpers.core.hookenv.Hooks, ensure -that a BaseCoordinator subclass is instantiated and its handle() -method called at the start of all your hooks. - -For example:: - - import sys - from charmhelpers.core import hookenv - from charmhelpers import coordinator - - hooks = hookenv.Hooks() - - def maybe_restart(): - serial = coordinator.Serial() - if serial.granted('restart'): - hookenv.service_restart('myservice') - - @hooks.hook - def config_changed(): - update_config() - serial = coordinator.Serial() - if needs_restart(): - serial.acquire('restart'): - maybe_restart() - - # Cluster hooks must be wired up. - @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') - def cluster_relation_changed(): - maybe_restart() - - # Leader hooks must be wired up. - @hooks.hook('leader-elected', 'leader-settings-changed') - def leader_settings_changed(): - maybe_restart() - - [ ... repeat for *all* other hooks you are using ... ] - - if __name__ == '__main__': - _ = coordinator.Serial() # Must instantiate before execute() - hooks.execute(sys.argv) - - -You can also use the require decorator. If the lock has not been granted, -an attempt to acquire() it will be made if the guard function returns True. -If the lock has been granted, the decorated function is run as normal:: - - from charmhelpers.core import hookenv - - hooks = hookenv.Hooks() - serial = coordinator.Serial() # Must instantiate before execute() - - @require('restart', needs_restart) - def maybe_restart(): - hookenv.service_restart('myservice') - - @hooks.hook('install', 'config-changed', 'upgrade-charm', - # Peers and leader hooks must be wired up. - 'cluster-relation-changed', 'cluster-relation-departed', - 'leader-elected', 'leader-settings-changed') - def default_hook(): - [...] - maybe_restart() - - if __name__ == '__main__': - hooks.execute() - - -Details -======= - -A simple API is provided similar to traditional locking APIs. A lock -may be requested using the acquire() method, and the granted() method -may be used do to check if a lock previously requested by acquire() has -been granted. It doesn't matter how many times acquire() is called in a -hook. - -Locks are released at the end of the hook they are acquired in. This may -be the current hook if the unit is leader and the lock is free. It is -more likely a future hook (probably leader-settings-changed, possibly -the peers relation-changed or departed hook, potentially any hook). - -Whenever a charm needs to perform a coordinated action it will acquire() -the lock and perform the action immediately if acquisition is -successful. It will also need to perform the same action in every other -hook if the lock has been granted. - - -Grubby Details --------------- - -Why do you need to be able to perform the same action in every hook? -If the unit is the leader, then it may be able to grant its own lock -and perform the action immediately in the source hook. If the unit is -the leader and cannot immediately grant the lock, then its only -guaranteed chance of acquiring the lock is in the peers relation-joined, -relation-changed or peers relation-departed hooks when another unit has -released it (the only channel to communicate to the leader is the peers -relation). If the unit is not the leader, then it is unlikely the lock -is granted in the source hook (a previous hook must have also made the -request for this to happen). A non-leader is notified about the lock via -leader settings. These changes may be visible in any hook, even before -the leader-settings-changed hook has been invoked. Or the requesting -unit may be promoted to leader after making a request, in which case the -lock may be granted in leader-elected or in a future peers -relation-changed or relation-departed hook. - -This could be simpler if leader-settings-changed was invoked on the -leader. We could then never grant locks except in -leader-settings-changed hooks giving one place for the operation to be -performed. Unfortunately this is not the case with Juju 1.23 leadership. - -But of course, this doesn't really matter to most people as most people -seem to prefer the Services Framework or similar reset-the-world -approaches, rather than the twisty maze of attempting to deduce what -should be done based on what hook happens to be running (which always -seems to evolve into reset-the-world anyway when the charm grows beyond -the trivial). - -I chose not to implement a callback model, where a callback was passed -to acquire to be executed when the lock is granted, because the callback -may become invalid between making the request and the lock being granted -due to an upgrade-charm being run in the interim. And it would create -restrictions, such no lambdas, callback defined at the top level of a -module, etc. Still, we could implement it on top of what is here, eg. -by adding a defer decorator that stores a pickle of itself to disk and -have BaseCoordinator unpickle and execute them when the locks are granted. -''' -from datetime import datetime -from functools import wraps -import json -import os.path - -from six import with_metaclass - -from charmhelpers.core import hookenv - - -# We make BaseCoordinator and subclasses singletons, so that if we -# need to spill to local storage then only a single instance does so, -# rather than having multiple instances stomp over each other. -class Singleton(type): - _instances = {} - - def __call__(cls, *args, **kwargs): - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, - **kwargs) - return cls._instances[cls] - - -class BaseCoordinator(with_metaclass(Singleton, object)): - relid = None # Peer relation-id, set by __init__ - relname = None - - grants = None # self.grants[unit][lock] == timestamp - requests = None # self.requests[unit][lock] == timestamp - - def __init__(self, relation_key='coordinator', peer_relation_name=None): - '''Instatiate a Coordinator. - - Data is stored on the peers relation and in leadership storage - under the provided relation_key. - - The peers relation is identified by peer_relation_name, and defaults - to the first one found in metadata.yaml. - ''' - # Most initialization is deferred, since invoking hook tools from - # the constructor makes testing hard. - self.key = relation_key - self.relname = peer_relation_name - hookenv.atstart(self.initialize) - - # Ensure that handle() is called, without placing that burden on - # the charm author. They still need to do this manually if they - # are not using a hook framework. - hookenv.atstart(self.handle) - - def initialize(self): - if self.requests is not None: - return # Already initialized. - - assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+' - - if self.relname is None: - self.relname = _implicit_peer_relation_name() - - relids = hookenv.relation_ids(self.relname) - if relids: - self.relid = sorted(relids)[0] - - # Load our state, from leadership, the peer relationship, and maybe - # local state as a fallback. Populates self.requests and self.grants. - self._load_state() - self._emit_state() - - # Save our state if the hook completes successfully. - hookenv.atexit(self._save_state) - - # Schedule release of granted locks for the end of the hook. - # This needs to be the last of our atexit callbacks to ensure - # it will be run first when the hook is complete, because there - # is no point mutating our state after it has been saved. - hookenv.atexit(self._release_granted) - - def acquire(self, lock): - '''Acquire the named lock, non-blocking. - - The lock may be granted immediately, or in a future hook. - - Returns True if the lock has been granted. The lock will be - automatically released at the end of the hook in which it is - granted. - - Do not mindlessly call this method, as it triggers a cascade of - hooks. For example, if you call acquire() every time in your - peers relation-changed hook you will end up with an infinite loop - of hooks. It should almost always be guarded by some condition. - ''' - unit = hookenv.local_unit() - ts = self.requests[unit].get(lock) - if not ts: - # If there is no outstanding request on the peers relation, - # create one. - self.requests.setdefault(lock, {}) - self.requests[unit][lock] = _timestamp() - self.msg('Requested {}'.format(lock)) - - # If the leader has granted the lock, yay. - if self.granted(lock): - self.msg('Acquired {}'.format(lock)) - return True - - # If the unit making the request also happens to be the - # leader, it must handle the request now. Even though the - # request has been stored on the peers relation, the peers - # relation-changed hook will not be triggered. - if hookenv.is_leader(): - return self.grant(lock, unit) - - return False # Can't acquire lock, yet. Maybe next hook. - - def granted(self, lock): - '''Return True if a previously requested lock has been granted''' - unit = hookenv.local_unit() - ts = self.requests[unit].get(lock) - if ts and self.grants.get(unit, {}).get(lock) == ts: - return True - return False - - def requested(self, lock): - '''Return True if we are in the queue for the lock''' - return lock in self.requests[hookenv.local_unit()] - - def request_timestamp(self, lock): - '''Return the timestamp of our outstanding request for lock, or None. - - Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute. - ''' - ts = self.requests[hookenv.local_unit()].get(lock, None) - if ts is not None: - return datetime.strptime(ts, _timestamp_format) - - def handle(self): - if not hookenv.is_leader(): - return # Only the leader can grant requests. - - self.msg('Leader handling coordinator requests') - - # Clear our grants that have been released. - for unit in self.grants.keys(): - for lock, grant_ts in list(self.grants[unit].items()): - req_ts = self.requests.get(unit, {}).get(lock) - if req_ts != grant_ts: - # The request timestamp does not match the granted - # timestamp. Several hooks on 'unit' may have run - # before the leader got a chance to make a decision, - # and 'unit' may have released its lock and attempted - # to reacquire it. This will change the timestamp, - # and we correctly revoke the old grant putting it - # to the end of the queue. - ts = datetime.strptime(self.grants[unit][lock], - _timestamp_format) - del self.grants[unit][lock] - self.released(unit, lock, ts) - - # Grant locks - for unit in self.requests.keys(): - for lock in self.requests[unit]: - self.grant(lock, unit) - - def grant(self, lock, unit): - '''Maybe grant the lock to a unit. - - The decision to grant the lock or not is made for $lock - by a corresponding method grant_$lock, which you may define - in a subclass. If no such method is defined, the default_grant - method is used. See Serial.default_grant() for details. - ''' - if not hookenv.is_leader(): - return False # Not the leader, so we cannot grant. - - # Set of units already granted the lock. - granted = set() - for u in self.grants: - if lock in self.grants[u]: - granted.add(u) - if unit in granted: - return True # Already granted. - - # Ordered list of units waiting for the lock. - reqs = set() - for u in self.requests: - if u in granted: - continue # In the granted set. Not wanted in the req list. - for l, ts in self.requests[u].items(): - if l == lock: - reqs.add((ts, u)) - queue = [t[1] for t in sorted(reqs)] - if unit not in queue: - return False # Unit has not requested the lock. - - # Locate custom logic, or fallback to the default. - grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant) - - if grant_func(lock, unit, granted, queue): - # Grant the lock. - self.msg('Leader grants {} to {}'.format(lock, unit)) - self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock] - return True - - return False - - def released(self, unit, lock, timestamp): - '''Called on the leader when it has released a lock. - - By default, does nothing but log messages. Override if you - need to perform additional housekeeping when a lock is released, - for example recording timestamps. - ''' - interval = _utcnow() - timestamp - self.msg('Leader released {} from {}, held {}'.format(lock, unit, - interval)) - - def require(self, lock, guard_func, *guard_args, **guard_kw): - """Decorate a function to be run only when a lock is acquired. - - The lock is requested if the guard function returns True. - - The decorated function is called if the lock has been granted. - """ - def decorator(f): - @wraps(f) - def wrapper(*args, **kw): - if self.granted(lock): - self.msg('Granted {}'.format(lock)) - return f(*args, **kw) - if guard_func(*guard_args, **guard_kw) and self.acquire(lock): - return f(*args, **kw) - return None - return wrapper - return decorator - - def msg(self, msg): - '''Emit a message. Override to customize log spam.''' - hookenv.log('coordinator.{} {}'.format(self._name(), msg), - level=hookenv.INFO) - - def _name(self): - return self.__class__.__name__ - - def _load_state(self): - self.msg('Loading state'.format(self._name())) - - # All responses must be stored in the leadership settings. - # The leader cannot use local state, as a different unit may - # be leader next time. Which is fine, as the leadership - # settings are always available. - self.grants = json.loads(hookenv.leader_get(self.key) or '{}') - - local_unit = hookenv.local_unit() - - # All requests must be stored on the peers relation. This is - # the only channel units have to communicate with the leader. - # Even the leader needs to store its requests here, as a - # different unit may be leader by the time the request can be - # granted. - if self.relid is None: - # The peers relation is not available. Maybe we are early in - # the units's lifecycle. Maybe this unit is standalone. - # Fallback to using local state. - self.msg('No peer relation. Loading local state') - self.requests = {local_unit: self._load_local_state()} - else: - self.requests = self._load_peer_state() - if local_unit not in self.requests: - # The peers relation has just been joined. Update any state - # loaded from our peers with our local state. - self.msg('New peer relation. Merging local state') - self.requests[local_unit] = self._load_local_state() - - def _emit_state(self): - # Emit this units lock status. - for lock in sorted(self.requests[hookenv.local_unit()].keys()): - if self.granted(lock): - self.msg('Granted {}'.format(lock)) - else: - self.msg('Waiting on {}'.format(lock)) - - def _save_state(self): - self.msg('Publishing state'.format(self._name())) - if hookenv.is_leader(): - # sort_keys to ensure stability. - raw = json.dumps(self.grants, sort_keys=True) - hookenv.leader_set({self.key: raw}) - - local_unit = hookenv.local_unit() - - if self.relid is None: - # No peers relation yet. Fallback to local state. - self.msg('No peer relation. Saving local state') - self._save_local_state(self.requests[local_unit]) - else: - # sort_keys to ensure stability. - raw = json.dumps(self.requests[local_unit], sort_keys=True) - hookenv.relation_set(self.relid, relation_settings={self.key: raw}) - - def _load_peer_state(self): - requests = {} - units = set(hookenv.related_units(self.relid)) - units.add(hookenv.local_unit()) - for unit in units: - raw = hookenv.relation_get(self.key, unit, self.relid) - if raw: - requests[unit] = json.loads(raw) - return requests - - def _local_state_filename(self): - # Include the class name. We allow multiple BaseCoordinator - # subclasses to be instantiated, and they are singletons, so - # this avoids conflicts (unless someone creates and uses two - # BaseCoordinator subclasses with the same class name, so don't - # do that). - return '.charmhelpers.coordinator.{}'.format(self._name()) - - def _load_local_state(self): - fn = self._local_state_filename() - if os.path.exists(fn): - with open(fn, 'r') as f: - return json.load(f) - return {} - - def _save_local_state(self, state): - fn = self._local_state_filename() - with open(fn, 'w') as f: - json.dump(state, f) - - def _release_granted(self): - # At the end of every hook, release all locks granted to - # this unit. If a hook neglects to make use of what it - # requested, it will just have to make the request again. - # Implicit release is the only way this will work, as - # if the unit is standalone there may be no future triggers - # called to do a manual release. - unit = hookenv.local_unit() - for lock in list(self.requests[unit].keys()): - if self.granted(lock): - self.msg('Released local {} lock'.format(lock)) - del self.requests[unit][lock] - - -class Serial(BaseCoordinator): - def default_grant(self, lock, unit, granted, queue): - '''Default logic to grant a lock to a unit. Unless overridden, - only one unit may hold the lock and it will be granted to the - earliest queued request. - - To define custom logic for $lock, create a subclass and - define a grant_$lock method. - - `unit` is the unit name making the request. - - `granted` is the set of units already granted the lock. It will - never include `unit`. It may be empty. - - `queue` is the list of units waiting for the lock, ordered by time - of request. It will always include `unit`, but `unit` is not - necessarily first. - - Returns True if the lock should be granted to `unit`. - ''' - return unit == queue[0] and not granted - - -def _implicit_peer_relation_name(): - md = hookenv.metadata() - assert 'peers' in md, 'No peer relations in metadata.yaml' - return sorted(md['peers'].keys())[0] - - -# A human readable, sortable UTC timestamp format. -_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ' - - -def _utcnow(): # pragma: no cover - # This wrapper exists as mocking datetime methods is problematic. - return datetime.utcnow() - - -def _timestamp(): - return _utcnow().strftime(_timestamp_format) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py b/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py deleted file mode 100644 index bb05620..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/decorators.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley <opentastic@gmail.com> -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/files.py b/charms/trusty/cassandra/hooks/charmhelpers/core/files.py deleted file mode 100644 index 0f12d32..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/files.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' - -import os -import subprocess - - -def sed(filename, before, after, flags='g'): - """ - Search and replaces the given pattern on filename. - - :param filename: relative or absolute file path. - :param before: expression to be replaced (see 'man sed') - :param after: expression to replace with (see 'man sed') - :param flags: sed-compatible regex flags in example, to make - the search and replace case insensitive, specify ``flags="i"``. - The ``g`` flag is always specified regardless, so you do not - need to remember to include it when overriding this parameter. - :returns: If the sed command exit code was zero then return, - otherwise raise CalledProcessError. - """ - expression = r's/{0}/{1}/{2}'.format(before, - after, flags) - - return subprocess.check_call(["sed", "-i", "-r", "-e", - expression, - os.path.expanduser(filename)]) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py b/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py deleted file mode 100644 index 3056fba..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/fstab.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import io -import os - -__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py b/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py deleted file mode 100644 index 3912e6e..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/hookenv.py +++ /dev/null @@ -1,1026 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers <juju@lists.ubuntu.com> - -from __future__ import print_function -import copy -from distutils.version import LooseVersion -from functools import wraps -import glob -import os -import json -import yaml -import socket -import subprocess -import sys -import errno -import tempfile -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = str((func, args, kwargs)) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - wrapper._wrapped = func - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -@cached -def relation_id(relation_name=None, service_or_unit=None): - """The relation ID for the current or a specified relation""" - if not relation_name and not service_or_unit: - return os.environ.get('JUJU_RELATION_ID', None) - elif relation_name and service_or_unit: - service_name = service_or_unit.split('/')[0] - for relid in relation_ids(relation_name): - remote_service = remote_service_name(relid) - if remote_service == service_name: - return relid - else: - raise ValueError('Must specify neither or both of relation_name and service_or_unit') - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def service_name(): - """The name service group this unit belongs to""" - return local_unit().split('/')[0] - - -@cached -def remote_service_name(relid=None): - """The remote service name for a given relation-id (or the current relation)""" - if relid is None: - unit = remote_unit() - else: - units = related_units(relid) - unit = units[0] if units else None - return unit.split('/')[0] if unit else None - - -def hook_name(): - """The name of the currently executing hook""" - return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): - self.load_previous() - atexit(self._implicit_save) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - self._prev_dict = json.load(f) - for k, v in copy.deepcopy(self._prev_dict).items(): - if k not in self: - self[k] = v - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework <services.base>` - or :meth:'@hook <Hooks.hook>' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - with open(self.path, 'w') as f: - json.dump(self, f) - - def _implicit_save(self): - if self.implicit_save: - self.save() - - -@cached -def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - config_cmd_line.append('--format=json') - try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - if scope is not None: - return config_data - return Config(config_data) - except ValueError: - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"], universal_newlines=True) - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - settings = relation_settings.copy() - settings.update(kwargs) - for key, value in settings.items(): - # Force value to be a string: it always should, but some call - # sites pass in things like dicts or numbers. - if value is not None: - settings[key] = "{}".format(value) - if accepts_file: - # --file was introduced in Juju 1.23.2. Use it by default if - # available, since otherwise we'll break if the relation data is - # too big. Ideally we should tell relation-set to read the data from - # stdin, but that feature is broken in 1.23.2: Bug #1454678. - with tempfile.NamedTemporaryFile(delete=False) as settings_file: - settings_file.write(yaml.safe_dump(settings).encode("utf-8")) - subprocess.check_call( - relation_cmd_line + ["--file", settings_file.name]) - os.remove(settings_file.name) - else: - for key, value in settings.items(): - if value is None: - relation_cmd_line.append('{}='.format(key)) - else: - relation_cmd_line.append('{}={}'.format(key, value)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -def relation_clear(r_id=None): - ''' Clears any relation data already set on relation r_id ''' - settings = relation_get(rid=r_id, - unit=local_unit()) - for setting in settings: - if setting not in ['public-address', 'private-address']: - settings[setting] = None - relation_set(relation_id=r_id, - **settings) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def peer_relation_id(): - '''Get the peers relation id if a peers relation has been joined, else None.''' - md = metadata() - section = md.get('peers') - if section: - for key in section: - relids = relation_ids(key) - if relids: - return relids[0] - return None - - -@cached -def relation_to_interface(relation_name): - """ - Given the name of a relation, return the interface that relation uses. - - :returns: The interface name, or ``None``. - """ - return relation_to_role_and_interface(relation_name)[1] - - -@cached -def relation_to_role_and_interface(relation_name): - """ - Given the name of a relation, return the role and the name of the interface - that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). - - :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. - """ - _metadata = metadata() - for role in ('provides', 'requires', 'peers'): - interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') - if interface: - return role, interface - return None, None - - -@cached -def role_and_interface_to_relations(role, interface_name): - """ - Given a role and interface name, return a list of relation names for the - current charm that use that interface under that role (where role is one - of ``provides``, ``requires``, or ``peers``). - - :returns: A list of relation names. - """ - _metadata = metadata() - results = [] - for relation_name, relation in _metadata.get(role, {}).items(): - if relation['interface'] == interface_name: - results.append(relation_name) - return results - - -@cached -def interface_to_relations(interface_name): - """ - Given an interface, return a list of relation names for the current - charm that use that interface. - - :returns: A list of relation names. - """ - results = [] - for role in ('provides', 'requires', 'peers'): - results.extend(role_and_interface_to_relations(role, interface_name)) - return results - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return _ensure_ip(unit_get('public-address')) - - -def unit_private_ip(): - """Get this unit's private IP address""" - return _ensure_ip(unit_get('private-address')) - - -def _ensure_ip(addr): - """If addr is a hostname, resolve it to an IP address""" - if not addr: - return None - # We need to use socket.getaddrinfo for IPv6 support. - info = socket.getaddrinfo(addr, None) - if info is None: - # Should never happen - raise ValueError("Invalid result None from getaddinfo") - try: - return info[0][4][0] - except IndexError: - # Should never happen - raise ValueError("Invalid result {!r} from getaddinfo".format(info)) - - -@cached -def storage_get(attribute=None, storage_id=None): - """Get storage attributes""" - _args = ['storage-get', '--format=json'] - if storage_id: - _args.extend(('-s', storage_id)) - if attribute: - _args.append(attribute) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -@cached -def storage_list(storage_name=None): - """List the storage IDs for the unit""" - _args = ['storage-list', '--format=json'] - if storage_name: - _args.append(storage_name) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except OSError as e: - import errno - if e.errno == errno.ENOENT: - # storage-list does not exist - return [] - raise - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=None): - super(Hooks, self).__init__() - self._hooks = {} - - # For unknown reasons, we allow the Hooks constructor to override - # config().implicit_save. - if config_save is not None: - config().implicit_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - _run_atstart() - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - try: - self._hooks[hook_name]() - except SystemExit as x: - if x.code is None or x.code == 0: - _run_atexit() - raise - _run_atexit() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -def charm_dir(): - """Return the root directory of the current charm""" - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def action_name(): - """Get the name of the currently executing action.""" - return os.environ.get('JUJU_ACTION_NAME') - - -def action_uuid(): - """Get the UUID of the currently executing action.""" - return os.environ.get('JUJU_ACTION_UUID') - - -def action_tag(): - """Get the tag for the currently executing action.""" - return os.environ.get('JUJU_ACTION_TAG') - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state and message - - If the status-get command is not found then assume this is juju < 1.23 and - return 'unknown', "" - - """ - cmd = ['status-get', "--format=json", "--include-data"] - try: - raw_status = subprocess.check_output(cmd) - except OSError as e: - if e.errno == errno.ENOENT: - return ('unknown', "") - else: - raise - else: - status = json.loads(raw_status.decode("UTF-8")) - return (status["status"], status["message"]) - - -def translate_exc(from_exc, to_exc): - def inner_translate_exc1(f): - @wraps(f) - def inner_translate_exc2(*args, **kwargs): - try: - return f(*args, **kwargs) - except from_exc: - raise to_exc - - return inner_translate_exc2 - - return inner_translate_exc1 - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def is_leader(): - """Does the current unit hold the juju leadership - - Uses juju to determine whether the current unit is the leader of its peers - """ - cmd = ['is-leader', '--format=json'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_get(attribute=None): - """Juju leader get value(s)""" - cmd = ['leader-get', '--format=json'] + [attribute or '-'] - return json.loads(subprocess.check_output(cmd).decode('UTF-8')) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def leader_set(settings=None, **kwargs): - """Juju leader set value(s)""" - # Don't log secrets. - # log("Juju leader-set '%s'" % (settings), level=DEBUG) - cmd = ['leader-set'] - settings = settings or {} - settings.update(kwargs) - for k, v in settings.items(): - if v is None: - cmd.append('{}='.format(k)) - else: - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_register(ptype, klass, pid): - """ is used while a hook is running to let Juju know that a - payload has been started.""" - cmd = ['payload-register'] - for x in [ptype, klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_unregister(klass, pid): - """ is used while a hook is running to let Juju know - that a payload has been manually stopped. The <class> and <id> provided - must match a payload that has been previously registered with juju using - payload-register.""" - cmd = ['payload-unregister'] - for x in [klass, pid]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def payload_status_set(klass, pid, status): - """is used to update the current status of a registered payload. - The <class> and <id> provided must match a payload that has been previously - registered with juju using payload-register. The <status> must be one of the - follow: starting, started, stopping, stopped""" - cmd = ['payload-status-set'] - for x in [klass, pid, status]: - cmd.append(x) - subprocess.check_call(cmd) - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def resource_get(name): - """used to fetch the resource path of the given name. - - <name> must match a name of defined resource in metadata.yaml - - returns either a path or False if resource not available - """ - if not name: - return False - - cmd = ['resource-get', name] - try: - return subprocess.check_output(cmd).decode('UTF-8') - except subprocess.CalledProcessError: - return False - - -@cached -def juju_version(): - """Full version string (eg. '1.23.3.1-trusty-amd64')""" - # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 - jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] - return subprocess.check_output([jujud, 'version'], - universal_newlines=True).strip() - - -@cached -def has_juju_version(minimum_version): - """Return True if the Juju version is at least the provided version""" - return LooseVersion(juju_version()) >= LooseVersion(minimum_version) - - -_atexit = [] -_atstart = [] - - -def atstart(callback, *args, **kwargs): - '''Schedule a callback to run before the main hook. - - Callbacks are run in the order they were added. - - This is useful for modules and classes to perform initialization - and inject behavior. In particular: - - - Run common code before all of your hooks, such as logging - the hook name or interesting relation data. - - Defer object or module initialization that requires a hook - context until we know there actually is a hook context, - making testing easier. - - Rather than requiring charm authors to include boilerplate to - invoke your helper's behavior, have it run automatically if - your object is instantiated or module imported. - - This is not at all useful after your hook framework as been launched. - ''' - global _atstart - _atstart.append((callback, args, kwargs)) - - -def atexit(callback, *args, **kwargs): - '''Schedule a callback to run on successful hook completion. - - Callbacks are run in the reverse order that they were added.''' - _atexit.append((callback, args, kwargs)) - - -def _run_atstart(): - '''Hook frameworks must invoke this before running the main hook body.''' - global _atstart - for callback, args, kwargs in _atstart: - callback(*args, **kwargs) - del _atstart[:] - - -def _run_atexit(): - '''Hook frameworks must invoke this after the main hook body has - successfully completed. Do not invoke it if the hook fails.''' - global _atexit - for callback, args, kwargs in reversed(_atexit): - callback(*args, **kwargs) - del _atexit[:] - - -@translate_exc(from_exc=OSError, to_exc=NotImplementedError) -def network_get_primary_address(binding): - ''' - Retrieve the primary network address for a named binding - - :param binding: string. The name of a relation of extra-binding - :return: string. The primary IP address for the named binding - :raise: NotImplementedError if run on Juju < 2.0 - ''' - cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).strip() diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/host.py b/charms/trusty/cassandra/hooks/charmhelpers/core/host.py deleted file mode 100644 index 481087b..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/host.py +++ /dev/null @@ -1,695 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt <nick.moffitt@canonical.com> -# Matthew Wedgwood <matthew.wedgwood@canonical.com> - -import os -import re -import pwd -import glob -import grp -import random -import string -import subprocess -import hashlib -import functools -import itertools -from contextlib import contextmanager -from collections import OrderedDict - -import six - -from .hookenv import log -from .fstab import Fstab - - -def service_start(service_name): - """Start a system service""" - return service('start', service_name) - - -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) - - -def service_restart(service_name): - """Restart a system service""" - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) - if not service_result and restart_on_failure: - service_result = service('restart', service_name) - return service_result - - -def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): - """Pause a system service. - - Stop it, and prevent it from starting again at boot.""" - stopped = True - if service_running(service_name): - stopped = service_stop(service_name) - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('disable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "disable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - return stopped - - -def service_resume(service_name, init_dir="/etc/init", - initd_dir="/etc/init.d"): - """Resume a system service. - - Reenable starting again at boot. Start the service""" - upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) - sysv_file = os.path.join(initd_dir, service_name) - if init_is_systemd(): - service('enable', service_name) - elif os.path.exists(upstart_file): - override_path = os.path.join( - init_dir, '{}.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) - elif os.path.exists(sysv_file): - subprocess.check_call(["update-rc.d", service_name, "enable"]) - else: - raise ValueError( - "Unable to detect {0} as SystemD, Upstart {1} or" - " SysV {2}".format( - service_name, upstart_file, sysv_file)) - - started = service_running(service_name) - if not started: - started = service_start(service_name) - return started - - -def service(action, service_name): - """Control a system service""" - if init_is_systemd(): - cmd = ['systemctl', action, service_name] - else: - cmd = ['service', service_name, action] - return subprocess.call(cmd) == 0 - - -def service_running(service_name): - """Determine whether a system service is running""" - if init_is_systemd(): - return service('is-active', service_name) - else: - try: - output = subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - if ("start/running" in output or "is running" in output or - "up and running" in output): - return True - else: - return False - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -SYSTEMD_SYSTEM = '/run/systemd/system' - - -def init_is_systemd(): - """Return True if the host system uses systemd, False otherwise.""" - return os.path.isdir(SYSTEMD_SYSTEM) - - -def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): - """Add a user to the system. - - Will log but otherwise succeed if the user already exists. - - :param str username: Username to create - :param str password: Password for user; if ``None``, create a system user - :param str shell: The default shell for the user - :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to username - :param list secondary_groups: Optional list of additional groups - - :returns: The password database entry struct, as returned by `pwd.getpwnam` - """ - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - if not primary_group: - try: - grp.getgrnam(username) - primary_group = username # avoid "group exists" error - except KeyError: - pass - if primary_group: - cmd.extend(['-g', primary_group]) - if secondary_groups: - cmd.extend(['-G', ','.join(secondary_groups)]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def user_exists(username): - """Check if a user exists""" - try: - pwd.getpwnam(username) - user_exists = True - except KeyError: - user_exists = False - return user_exists - - -def add_group(group_name, system_group=False): - """Add a group to the system""" - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - except KeyError: - log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = ['gpasswd', '-a', username, group] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def rsync(from_path, to_path, flags='-r', options=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab""" - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file""" - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def fstab_mount(mountpoint): - """Mount filesystem using fstab""" - cmd_args = ['mount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - return True - - -def file_hash(path, hash_type='md5'): - """Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def path_hash(path): - """Generate a hash checksum of all files matching 'path'. Standard - wildcards like '*' and '?' are supported, see documentation for the 'glob' - module for more information. - - :return: dict: A { filename: hash } dictionary for all matched files. - Empty if none found. - """ - return { - filename: file_hash(filename) - for filename in glob.iglob(path) - } - - -def check_hash(path, checksum, hash_type='md5'): - """Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - """A class derived from Value error to indicate the checksum failed.""" - pass - - -def restart_on_change(restart_map, stopstart=False): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - '/etc/apache/sites-enabled/*': [ 'apache2' ] - }) - def config_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. The apache2 service would be - restarted if any file matching the pattern got changed, created - or removed. Standard wildcards are supported, see documentation - for the 'glob' module for more information. - - @param restart_map: {path_file_name: [service_name, ...] - @param stopstart: DEFAULT false; whether to stop, start OR restart - @returns result from decorated function - """ - def wrap(f): - @functools.wraps(f) - def wrapped_f(*args, **kwargs): - return restart_on_change_helper( - (lambda: f(*args, **kwargs)), restart_map, stopstart) - return wrapped_f - return wrap - - -def restart_on_change_helper(lambda_f, restart_map, stopstart=False): - """Helper function to perform the restart_on_change function. - - This is provided for decorators to restart services if files described - in the restart_map have changed after an invocation of lambda_f(). - - @param lambda_f: function to call. - @param restart_map: {file: [service, ...]} - @param stopstart: whether to stop, start or restart a service - @returns result of lambda_f() - """ - checksums = {path: path_hash(path) for path in restart_map} - r = lambda_f() - # create a list of lists of the services to restart - restarts = [restart_map[path] - for path in restart_map - if path_hash(path) != checksums[path]] - # create a flat list of ordered services without duplicates from lists - services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) - if services_list: - actions = ('stop', 'start') if stopstart else ('restart',) - for action in actions: - for service_name in services_list: - service(action, service_name) - return r - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def is_phy_iface(interface): - """Returns True if interface is not virtual, otherwise False.""" - if interface: - sys_net = '/sys/class/net' - if os.path.isdir(sys_net): - for iface in glob.glob(os.path.join(sys_net, '*')): - if '/virtual/' in os.path.realpath(iface): - continue - - if interface == os.path.basename(iface): - return True - - return False - - -def get_bond_master(interface): - """Returns bond master if interface is bond slave otherwise None. - - NOTE: the provided interface is expected to be physical - """ - if interface: - iface_path = '/sys/class/net/%s' % (interface) - if os.path.exists(iface_path): - if '/virtual/' in os.path.realpath(iface_path): - return None - - master = os.path.join(iface_path, 'master') - if os.path.exists(master): - master = os.path.realpath(master) - # make sure it is a bond master - if os.path.exists(os.path.join(master, 'bonding')): - return os.path.basename(master) - - return None - - -def list_nics(nic_type=None): - """Return a list of nics of given type(s)""" - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - - interfaces = [] - if nic_type: - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - ip_output = ip_output.split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + - r'[0-9]+\.[0-9]+)@.*', line) - if matched: - iface = matched.groups()[0] - else: - iface = line.split()[1].replace(":", "") - - if iface not in interfaces: - interfaces.append(iface) - else: - cmd = ['ip', 'a'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line.strip() for line in ip_output if line) - - key = re.compile('^[0-9]+:\s+(.+):') - for line in ip_output: - matched = re.search(key, line) - if matched: - iface = matched.group(1) - iface = iface.partition("@")[0] - if iface not in interfaces: - interfaces.append(iface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - """Set the Maximum Transmission Unit (MTU) on a network interface.""" - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - """Return the Maximum Transmission Unit (MTU) for a network interface.""" - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - """Return the Media Access Control (MAC) for a network interface.""" - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -def cmp_pkgrevno(package, revno, pkgcache=None): - """Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - """ - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - -@contextmanager -def chdir(directory): - """Change the current working directory to a different directory for a code - block and return the previous directory after the block exits. Useful to - run commands from a specificed directory. - - :param str directory: The directory path to change to for this context. - """ - cur = os.getcwd() - try: - yield os.chdir(directory) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """Recursively change user and group ownership of files and directories - in given path. Doesn't chown path itself by default, only its children. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - :param bool follow_links: Also Chown links if True - :param bool chowntopdir: Also chown path itself if True - """ - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - if chowntopdir: - broken_symlink = os.path.lexists(path) and not os.path.exists(path) - if not broken_symlink: - chown(path, uid, gid) - for root, dirs, files in os.walk(path): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - """Recursively change user and group ownership of files and directories - in a given path, not following symbolic links. See the documentation for - 'os.lchown' for more information. - - :param str path: The string path to start changing ownership. - :param str owner: The owner string to use when looking up the uid. - :param str group: The group string to use when looking up the gid. - """ - chownr(path, owner, group, follow_links=False) - - -def get_total_ram(): - """The total amount of system RAM in bytes. - - This is what is reported by the OS, and may be overcommitted when - there are multiple containers hosted on the same machine. - """ - with open('/proc/meminfo', 'r') as f: - for line in f.readlines(): - if line: - key, value, unit = line.split() - if key == 'MemTotal:': - assert unit == 'kB', 'Unknown unit' - return int(value) * 1024 # Classic, not KiB. - raise NotImplementedError() diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py b/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py deleted file mode 100644 index a783ad9..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/hugepage.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import yaml -from charmhelpers.core import fstab -from charmhelpers.core import sysctl -from charmhelpers.core.host import ( - add_group, - add_user_to_group, - fstab_mount, - mkdir, -) -from charmhelpers.core.strutils import bytes_from_string -from subprocess import check_output - - -def hugepage_support(user, group='hugetlb', nr_hugepages=256, - max_map_count=65536, mnt_point='/run/hugepages/kvm', - pagesize='2MB', mount=True, set_shmmax=False): - """Enable hugepages on system. - - Args: - user (str) -- Username to allow access to hugepages to - group (str) -- Group name to own hugepages - nr_hugepages (int) -- Number of pages to reserve - max_map_count (int) -- Number of Virtual Memory Areas a process can own - mnt_point (str) -- Directory to mount hugepages on - pagesize (str) -- Size of hugepages - mount (bool) -- Whether to Mount hugepages - """ - group_info = add_group(group) - gid = group_info.gr_gid - add_user_to_group(user, group) - if max_map_count < 2 * nr_hugepages: - max_map_count = 2 * nr_hugepages - sysctl_settings = { - 'vm.nr_hugepages': nr_hugepages, - 'vm.max_map_count': max_map_count, - 'vm.hugetlb_shm_group': gid, - } - if set_shmmax: - shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) - shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages - if shmmax_minsize > shmmax_current: - sysctl_settings['kernel.shmmax'] = shmmax_minsize - sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') - mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) - lfstab = fstab.Fstab() - fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) - if fstab_entry: - lfstab.remove_entry(fstab_entry) - entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', - 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) - lfstab.add_entry(entry) - if mount: - fstab_mount(mnt_point) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py b/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py deleted file mode 100644 index 5dc6495..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/kernel.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" - -from charmhelpers.core.hookenv import ( - log, - INFO -) - -from subprocess import check_call, check_output -import re - - -def modprobe(module, persist=True): - """Load a kernel module and configure for auto-load on reboot.""" - cmd = ['modprobe', module] - - log('Loading kernel module %s' % module, level=INFO) - - check_call(cmd) - if persist: - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - -def rmmod(module, force=False): - """Remove a module from the linux kernel""" - cmd = ['rmmod'] - if force: - cmd.append('-f') - cmd.append(module) - log('Removing kernel module %s' % module, level=INFO) - return check_call(cmd) - - -def lsmod(): - """Shows what kernel modules are currently loaded""" - return check_output(['lsmod'], - universal_newlines=True) - - -def is_module_loaded(module): - """Checks if a kernel module is already loaded""" - matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) - return len(matches) > 0 - - -def update_initramfs(version='all'): - """Updates an initramfs image""" - return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py deleted file mode 100644 index 0928158..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/services/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -from .base import * # NOQA -from .helpers import * # NOQA diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py deleted file mode 100644 index a42660c..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/services/base.py +++ /dev/null @@ -1,353 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import os -import json -from inspect import getargspec -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": <service name>, - "required_data": <list of required data contexts>, - "provided_data": <list of provided data contexts>, - "data_ready": <one or more callbacks>, - "data_lost": <one or more callbacks>, - "start": <one or more callbacks>, - "stop": <one or more callbacks>, - "ports": <list of ports to manage>, - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hookenv._run_atstart() - try: - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.reconfigure_services() - self.provide_data() - except SystemExit as x: - if x.code is None or x.code == 0: - hookenv._run_atexit() - hookenv._run_atexit() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - - The `provide_data()` method can optionally accept two parameters: - - * ``remote_service`` The name of the remote service that the data will - be provided to. The `provide_data()` method will be called once - for each connected service (not unit). This allows the method to - tailor its data to the given service. - * ``service_ready`` Whether or not the service definition had all of - its requirements met, and thus the ``data_ready`` callbacks run. - - Note that the ``provided_data`` methods are now called **after** the - ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks - a chance to generate any data necessary for the providing to the remote - services. - """ - for service_name, service in self.services.items(): - service_ready = self.is_ready(service_name) - for provider in service.get('provided_data', []): - for relid in hookenv.relation_ids(provider.name): - units = hookenv.related_units(relid) - if not units: - continue - remote_service = units[0].split('/')[0] - argspec = getargspec(provider.provide_data) - if len(argspec.args) > 1: - data = provider.provide_data(remote_service, service_ready) - else: - data = provider.provide_data() - if data: - hookenv.relation_set(relid, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - new_ports = service.get('ports', []) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - if event_name == 'start': - hookenv.open_port(port) - elif event_name == 'stop': - hookenv.close_port(port) - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py b/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py deleted file mode 100644 index 2423704..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/services/helpers.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import os -import yaml - -from charmhelpers.core import hookenv -from charmhelpers.core import host -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to (or None) - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - :param partial on_change_action: functools partial to be executed when - rendered file changes - :param jinja2 loader template_loader: A jinja2 template loader - - :return str: The rendered template - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444, - on_change_action=None, template_loader=None): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - self.on_change_action = on_change_action - self.template_loader = template_loader - - def __call__(self, manager, service_name, event_name): - pre_checksum = '' - if self.on_change_action and os.path.isfile(self.target): - pre_checksum = host.file_hash(self.target) - service = manager.get_service(service_name) - context = {'ctx': {}} - for ctx in service.get('required_data', []): - context.update(ctx) - context['ctx'].update(ctx) - - result = templating.render(self.source, self.target, context, - self.owner, self.group, self.perms, - template_loader=self.template_loader) - if self.on_change_action: - if pre_checksum == host.file_hash(self.target): - hookenv.log( - 'No change detected: {}'.format(self.target), - hookenv.DEBUG) - else: - self.on_change_action() - - return result - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py b/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py deleted file mode 100644 index 7e3f969..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/strutils.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import six -import re - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) - - -def bytes_from_string(value): - """Interpret human readable string value as bytes. - - Returns int - """ - BYTE_POWER = { - 'K': 1, - 'KB': 1, - 'M': 2, - 'MB': 2, - 'G': 3, - 'GB': 3, - 'T': 4, - 'TB': 4, - 'P': 5, - 'PB': 5, - } - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py b/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py deleted file mode 100644 index 21cc8ab..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/sysctl.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py b/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py deleted file mode 100644 index d2d8eaf..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/templating.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import os - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. It can also be `None`, in which - case no file will be written. - - The context should be a dict containing the values to be replaced in the - template. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - The rendered template will be written to the file as well as being returned - as a string. - - Note: Using this requires python-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - apt_install('python-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if template_loader: - template_env = Environment(loader=template_loader) - else: - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - if target is not None: - target_dir = os.path.dirname(target) - if not os.path.exists(target_dir): - # This is a terrible default directory permission, as the file - # or its siblings will often contain secrets. - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) - return content diff --git a/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py b/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py deleted file mode 100644 index 338104e..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/core/unitdata.py +++ /dev/null @@ -1,521 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. -# -# -# Authors: -# Kapil Thangavelu <kapil.foss@gmail.com> -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import itertools -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are not persisted unless :meth:`flush` is called. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - if 'UNIT_STATE_DB' in os.environ: - self.db_path = os.environ['UNIT_STATE_DB'] - else: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def get(self, key, default=None, record=False): - self.cursor.execute('select data from kv where key=?', [key]) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - """ - Get a range of keys starting with a common prefix as a mapping of - keys to values. - - :param str key_prefix: Common prefix among all keys - :param bool strip: Optionally strip the common prefix from the key - names in the returned dict - :return dict: A (possibly empty) dict of key-value mappings - """ - self.cursor.execute("select key, data from kv where key like ?", - ['%s%%' % key_prefix]) - result = self.cursor.fetchall() - - if not result: - return {} - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - """ - Set the values of multiple keys at once. - - :param dict mapping: Mapping of keys to values - :param str prefix: Optional prefix to apply to all keys in `mapping` - before setting - """ - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - """ - Remove a key from the database entirely. - """ - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def unsetrange(self, keys=None, prefix=""): - """ - Remove a range of keys starting with a common prefix, from the database - entirely. - - :param list keys: List of keys to remove. - :param str prefix: Optional prefix to apply to all keys in ``keys`` - before removing. - """ - if keys is not None: - keys = ['%s%s' % (prefix, key) for key in keys] - self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), - list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) - else: - self.cursor.execute('delete from kv where key like ?', - ['%s%%' % prefix]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - """ - Set a value in the database. - - :param str key: Key to set the value for - :param value: Any JSON-serializable value to be set - """ - serialized = json.dumps(value) - - self.cursor.execute('select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py deleted file mode 100644 index 6dfe7ed..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/__init__.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import importlib -from tempfile import NamedTemporaryFile -import time -from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess -from charmhelpers.core.hookenv import ( - config, - log, -) -import os - -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse - - -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', - # Liberty - 'liberty': 'trusty-updates/liberty', - 'trusty-liberty': 'trusty-updates/liberty', - 'trusty-liberty/updates': 'trusty-updates/liberty', - 'trusty-updates/liberty': 'trusty-updates/liberty', - 'liberty/proposed': 'trusty-proposed/liberty', - 'trusty-liberty/proposed': 'trusty-proposed/liberty', - 'trusty-proposed/liberty': 'trusty-proposed/liberty', - # Mitaka - 'mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka': 'trusty-updates/mitaka', - 'trusty-mitaka/updates': 'trusty-updates/mitaka', - 'trusty-updates/mitaka': 'trusty-updates/mitaka', - 'mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', - 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', -} - -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', -) - -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - - -class SourceConfigError(Exception): - pass - - -class UnhandledSource(Exception): - pass - - -class AptLockError(Exception): - pass - - -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache() - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes', '--force-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_mark(packages, mark, fatal=False): - """Flag one or more packages using apt-mark""" - log("Marking {} as {}".format(packages, mark)) - cmd = ['apt-mark', mark] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - - if fatal: - subprocess.check_call(cmd, universal_newlines=True) - else: - subprocess.call(cmd, universal_newlines=True) - - -def apt_hold(packages, fatal=False): - return apt_mark(packages, 'hold', fatal=fatal) - - -def apt_unhold(packages, fatal=False): - return apt_mark(packages, 'unhold', fatal=fatal) - - -def add_source(source, key=None): - """Add a package source to this system. - - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - elif 'http://' in key: - with NamedTemporaryFile('w+') as key_file: - subprocess.check_call(['wget', key, '-O-'], stdout=key_file) - subprocess.check_call(['apt-key', 'add', key_file.name]) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) - - -def configure_sources(update=False, - sources_var='install_sources', - keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. - - The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their - corresponding keys are of the types supported by add_source(). - - Example config: - install_sources: | - - "ppa:foo" - - "http://example.com/repo precise main" - install_keys: | - - null - - "a1b2c3d4" - - Note that 'null' (a.k.a. None) should not be quoted. - """ - sources = safe_load((config(sources_var) or '').strip()) or [] - keys = safe_load((config(keys_var) or '').strip()) or None - - if isinstance(sources, six.string_types): - sources = [sources] - - if keys is None: - for source in sources: - add_source(source, None) - else: - if isinstance(keys, six.string_types): - keys = [keys] - - if len(sources) != len(keys): - raise SourceConfigError( - 'Install sources and keys lists are different lengths') - for source, key in zip(sources, keys): - add_source(source, key) - if update: - apt_update(fatal=True) - - -def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source - - The specified source should be a url of the form: - scheme://[host]/path[#[option=value][&...]] - - Schemes supported are based on this modules submodules. - Options supported are submodule-specific. - Additional arguments are passed through to the submodule. - - For example:: - - dest = install_remote('http://example.com/archive.tgz', - checksum='deadbeef', - hash_type='sha1') - - This will download `archive.tgz`, validate it using SHA1 and, if - the file is ok, extract it and return the directory in which it - was extracted. If the checksum fails, it will raise - :class:`charmhelpers.core.host.ChecksumError`. - """ - # We ONLY check for True here because can_handle may return a string - # explaining why it can't handle a given source. - handlers = [h for h in plugins() if h.can_handle(source) is True] - installed_to = None - for handler in handlers: - try: - installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource as e: - log('Install source attempt unsuccessful: {}'.format(e), - level='WARNING') - if not installed_to: - raise UnhandledSource("No handler found for source {}".format(source)) - return installed_to - - -def install_from_config(config_var_name): - charm_config = config() - source = charm_config[config_var_name] - return install_remote(source) - - -def plugins(fetch_handlers=None): - if not fetch_handlers: - fetch_handlers = FETCH_HANDLERS - plugin_list = [] - for handler_name in fetch_handlers: - package, classname = handler_name.rsplit('.', 1) - try: - handler_class = getattr( - importlib.import_module(package), - classname) - plugin_list.append(handler_class()) - except NotImplementedError: - # Skip missing plugins so that they can be ommitted from - # installation if desired - log("FetchHandler {} not found, skipping plugin".format( - handler_name)) - return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py deleted file mode 100644 index b8e0943..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/archiveurl.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import os -import hashlib -import re - -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs - - -def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' - _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: - return match.group(1, 2) - return None, host - - -def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' - _passwdprog = re.compile('^([^:]*):(.*)$', re.S) - match = _passwdprog.match(user) - if match: - return match.group(1, 2) - return user, None - - -class ArchiveUrlFetchHandler(BaseFetchHandler): - """ - Handler to download archive files from arbitrary URLs. - - Can fetch from http, https, ftp, and file URLs. - - Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. - - Installs the contents of the archive in $CHARM_DIR/fetched/. - """ - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): - # XXX: Why is this returning a boolean and a string? It's - # doomed to fail since "bool(can_handle('foo://'))" will be True. - return "Wrong source type" - if get_archive_handler(self.base_url(source)): - return True - return False - - def download(self, source, dest): - """ - Download an archive file. - - :param str source: URL pointing to an archive file. - :param str dest: Local path location to download archive file to. - """ - # propogate all exceptions - # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse(source) - if proto in ('http', 'https'): - auth, barehost = splituser(netloc) - if auth is not None: - source = urlunparse((proto, barehost, path, params, query, fragment)) - username, password = splitpasswd(auth) - passman = HTTPPasswordMgrWithDefaultRealm() - # Realm is set to None in add_password to force the username and password - # to be used whatever the realm - passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) - try: - with open(dest, 'wb') as dest_file: - dest_file.write(response.read()) - except Exception as e: - if os.path.isfile(dest): - os.unlink(dest) - raise e - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - tempfile, headers = urlretrieve(url) - check_hash(tempfile, hashsum, validate) - return tempfile - - def install(self, source, dest=None, checksum=None, hash_type='sha1'): - """ - Download and install an archive file, with optional checksum validation. - - The checksum can also be given on the `source` URL's fragment. - For example:: - - handler.install('http://example.com/file.tgz#sha1=deadbeef') - - :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. - :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - - """ - url_parts = self.parse_url(source) - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) - try: - self.download(source, dld_file) - except URLError as e: - raise UnhandledSource(e.reason) - except OSError as e: - raise UnhandledSource(e.strerror) - options = parse_qs(url_parts.fragment) - for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available - if key in algorithms: - if len(value) != 1: - raise TypeError( - "Expected 1 hash value, not %d" % len(value)) - expected = value[0] - check_hash(dld_file, expected, key) - if checksum: - check_hash(dld_file, checksum, hash_type) - return extract(dld_file, dest) diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py deleted file mode 100644 index cafd27f..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/bzrurl.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import os -from subprocess import check_call -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - apt_install, -) -from charmhelpers.core.host import mkdir - - -if filter_installed_packages(['bzr']) != []: - apt_install(['bzr']) - if filter_installed_packages(['bzr']) != []: - raise NotImplementedError('Unable to install bzr') - - -class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.bzr')) - else: - return True - - def branch(self, source, dest): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - if os.path.exists(dest): - check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) - else: - check_call(['bzr', 'branch', source, dest]) - - def install(self, source, dest=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - try: - self.branch(source, dest_dir) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py b/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py deleted file mode 100644 index 65ed531..0000000 --- a/charms/trusty/cassandra/hooks/charmhelpers/fetch/giturl.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. - -import os -from subprocess import check_call, CalledProcessError -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource, - filter_installed_packages, - apt_install, -) - -if filter_installed_packages(['git']) != []: - apt_install(['git']) - if filter_installed_packages(['git']) != []: - raise NotImplementedError('Unable to install git') - - -class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" - def can_handle(self, source): - url_parts = self.parse_url(source) - # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git', ''): - return False - elif not url_parts.scheme: - return os.path.exists(os.path.join(source, '.git')) - else: - return True - - def clone(self, source, dest, branch="master", depth=None): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - - if os.path.exists(dest): - cmd = ['git', '-C', dest, 'pull', source, branch] - else: - cmd = ['git', 'clone', source, dest, '--branch', branch] - if depth: - cmd.extend(['--depth', depth]) - check_call(cmd) - - def install(self, source, branch="master", dest=None, depth=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - try: - self.clone(source, dest_dir, branch, depth) - except CalledProcessError as e: - raise UnhandledSource(e) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/charms/trusty/cassandra/hooks/cluster-relation-changed b/charms/trusty/cassandra/hooks/cluster-relation-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/cluster-relation-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/cluster-relation-departed b/charms/trusty/cassandra/hooks/cluster-relation-departed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/cluster-relation-departed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/config-changed b/charms/trusty/cassandra/hooks/config-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/config-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/coordinator.py b/charms/trusty/cassandra/hooks/coordinator.py deleted file mode 100644 index c353671..0000000 --- a/charms/trusty/cassandra/hooks/coordinator.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -from charmhelpers.coordinator import BaseCoordinator - - -class CassandraCoordinator(BaseCoordinator): - def default_grant(self, lock, unit, granted, queue): - '''Grant locks to only one unit at a time, regardless of its name. - - This lets us keep separate locks like repair and restart, - while ensuring the operations do not occur on different nodes - at the same time. - ''' - # Return True if this unit has already been granted a lock. - if self.grants.get(unit): - return True - - # Otherwise, return True if the unit is first in the queue. - return queue[0] == unit and not granted - - -coordinator = CassandraCoordinator() diff --git a/charms/trusty/cassandra/hooks/data-relation-changed b/charms/trusty/cassandra/hooks/data-relation-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/data-relation-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/data-relation-departed b/charms/trusty/cassandra/hooks/data-relation-departed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/data-relation-departed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/database-admin-relation-changed b/charms/trusty/cassandra/hooks/database-admin-relation-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/database-admin-relation-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/database-relation-changed b/charms/trusty/cassandra/hooks/database-relation-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/database-relation-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/definitions.py b/charms/trusty/cassandra/hooks/definitions.py deleted file mode 100644 index 24f9497..0000000 --- a/charms/trusty/cassandra/hooks/definitions.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -from charmhelpers.core import hookenv -from charmhelpers.core import services - -import actions -import helpers -import relations - - -def get_service_definitions(): - # This looks like it could be a module level global list, but - # unfortunately that makes the module unimportable outside of a - # hook context. The main culprit is RelationContext, which invokes - # relation-get from its constructor. By wrapping the service - # definition list in this function, we can defer constructing it - # until we have constructed enough of a mock context and perform - # basic tests. - config = hookenv.config() - - return [ - # Prepare for the Cassandra service. - dict(service='install', - data_ready=[actions.set_proxy, - actions.preinstall, - actions.emit_meminfo, - actions.revert_unchangeable_config, - actions.store_unit_private_ip, - actions.add_implicit_package_signing_keys, - actions.configure_sources, - actions.swapoff, - actions.reset_sysctl, - actions.reset_limits, - actions.install_oracle_jre, - actions.install_cassandra_packages, - actions.emit_java_version, - actions.ensure_cassandra_package_status], - start=[], stop=[]), - - # Get Cassandra running. - dict(service=helpers.get_cassandra_service(), - - # Open access to client and replication ports. Client - # protocols require password authentication. Access to - # the unauthenticated replication ports is protected via - # ufw firewall rules. We do not open the JMX port, although - # we could since it is similarly protected by ufw. - ports=[config['rpc_port'], # Thrift clients - config['native_transport_port'], # Native clients. - config['storage_port'], # Plaintext replication - config['ssl_storage_port']], # Encrypted replication. - - required_data=[relations.StorageRelation(), - relations.PeerRelation()], - provided_data=[relations.StorageRelation()], - data_ready=[actions.configure_firewall, - actions.update_etc_hosts, - actions.maintain_seeds, - actions.configure_cassandra_yaml, - actions.configure_cassandra_env, - actions.configure_cassandra_rackdc, - actions.reset_all_io_schedulers, - actions.maybe_restart, - actions.request_unit_superuser, - actions.reset_default_password], - start=[services.open_ports], - stop=[actions.stop_cassandra, services.close_ports]), - - # Actions that must be done while Cassandra is running. - dict(service='post', - required_data=[RequiresLiveNode()], - data_ready=[actions.post_bootstrap, - actions.create_unit_superusers, - actions.reset_auth_keyspace_replication, - actions.publish_database_relations, - actions.publish_database_admin_relations, - actions.install_maintenance_crontab, - actions.nrpe_external_master_relation, - actions.emit_cluster_info, - actions.set_active], - start=[], stop=[])] - - -class RequiresLiveNode: - def __bool__(self): - is_live = self.is_live() - hookenv.log('Requirement RequiresLiveNode: {}'.format(is_live), - hookenv.DEBUG) - return is_live - - def is_live(self): - if helpers.is_decommissioned(): - hookenv.log('Node is decommissioned') - return False - - if helpers.is_cassandra_running(): - hookenv.log('Cassandra is running') - auth = hookenv.config()['authenticator'] - if auth == 'AllowAllAuthenticator': - return True - elif hookenv.local_unit() in helpers.get_unit_superusers(): - hookenv.log('Credentials created') - return True - else: - hookenv.log('Credentials have not been created') - return False - else: - hookenv.log('Cassandra is not running') - return False - - -def get_service_manager(): - return services.ServiceManager(get_service_definitions()) diff --git a/charms/trusty/cassandra/hooks/helpers.py b/charms/trusty/cassandra/hooks/helpers.py deleted file mode 100644 index b86a6b1..0000000 --- a/charms/trusty/cassandra/hooks/helpers.py +++ /dev/null @@ -1,1084 +0,0 @@ -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import configparser -from contextlib import contextmanager -from datetime import timedelta -from distutils.version import LooseVersion -import errno -from functools import wraps -import io -import json -import os.path -import re -import shutil -import subprocess -import sys -import tempfile -from textwrap import dedent -import time - -import bcrypt -from cassandra import ConsistencyLevel -import cassandra.auth -import cassandra.cluster -import cassandra.query -import yaml - -from charmhelpers.core import hookenv, host -from charmhelpers.core.hookenv import DEBUG, ERROR, WARNING -from charmhelpers import fetch - -from coordinator import coordinator - - -RESTART_TIMEOUT = 600 - - -def logged(func): - @wraps(func) - def wrapper(*args, **kw): - hookenv.log("* Helper {}/{}".format(hookenv.hook_name(), - func.__name__)) - return func(*args, **kw) - return wrapper - - -def backoff(what_for, max_pause=60): - i = 0 - while True: - yield True - i += 1 - pause = min(max_pause, 2 ** i) - time.sleep(pause) - if pause > 10: - hookenv.log('Recheck {} for {}'.format(i, what_for)) - - -# FOR CHARMHELPERS -@contextmanager -def autostart_disabled(services=None, _policy_rc='/usr/sbin/policy-rc.d'): - '''Tell well behaved Debian packages to not start services when installed. - ''' - script = ['#!/bin/sh'] - if services is not None: - for service in services: - script.append( - 'if [ "$1" = "{}" ]; then exit 101; fi'.format(service)) - script.append('exit 0') - else: - script.append('exit 101') # By default, all daemons disabled. - try: - if os.path.exists(_policy_rc): - shutil.move(_policy_rc, "{}-orig".format(_policy_rc)) - host.write_file(_policy_rc, '\n'.join(script).encode('ASCII'), - perms=0o555) - yield - finally: - os.unlink(_policy_rc) - if os.path.exists("{}-orig".format(_policy_rc)): - shutil.move("{}-orig".format(_policy_rc), _policy_rc) - - -# FOR CHARMHELPERS -@logged -def install_packages(packages): - packages = list(packages) - if hookenv.config('extra_packages'): - packages.extend(hookenv.config('extra_packages').split()) - packages = fetch.filter_installed_packages(packages) - if packages: - # The DSE packages are huge, so this might take some time. - status_set('maintenance', 'Installing packages') - with autostart_disabled(['cassandra']): - fetch.apt_install(packages, fatal=True) - - -# FOR CHARMHELPERS -@logged -def ensure_package_status(packages): - config_dict = hookenv.config() - - package_status = config_dict['package_status'] - - if package_status not in ['install', 'hold']: - raise RuntimeError("package_status must be 'install' or 'hold', " - "not {!r}".format(package_status)) - - selections = [] - for package in packages: - selections.append('{} {}\n'.format(package, package_status)) - dpkg = subprocess.Popen(['dpkg', '--set-selections'], - stdin=subprocess.PIPE) - dpkg.communicate(input=''.join(selections).encode('US-ASCII')) - - -def get_seed_ips(): - '''Return the set of seed ip addresses. - - We use ip addresses rather than unit names, as we may need to use - external seed ips at some point. - ''' - return set((hookenv.leader_get('seeds') or '').split(',')) - - -def actual_seed_ips(): - '''Return the seeds currently in cassandra.yaml''' - cassandra_yaml = read_cassandra_yaml() - s = cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds'] - return set(s.split(',')) - - -def get_database_directory(config_path): - '''Convert a database path from the service config to an absolute path. - - Entries in the config file may be absolute, relative to - /var/lib/cassandra, or relative to the mountpoint. - ''' - import relations - storage = relations.StorageRelation() - if storage.mountpoint: - root = os.path.join(storage.mountpoint, 'cassandra') - else: - root = '/var/lib/cassandra' - return os.path.join(root, config_path) - - -def ensure_database_directory(config_path): - '''Create the database directory if it doesn't exist, resetting - ownership and other settings while we are at it. - - Returns the absolute path. - ''' - absdir = get_database_directory(config_path) - - # Work around Bug #1427150 by ensuring components of the path are - # created with the required permissions, if necessary. - component = os.sep - for p in absdir.split(os.sep)[1:-1]: - component = os.path.join(component, p) - if not os.path.exists(p): - host.mkdir(component) - assert component == os.path.split(absdir)[0] - host.mkdir(absdir, owner='cassandra', group='cassandra', perms=0o750) - return absdir - - -def get_all_database_directories(): - config = hookenv.config() - dirs = dict( - data_file_directories=[get_database_directory(d) - for d in (config['data_file_directories'] or - 'data').split()], - commitlog_directory=get_database_directory( - config['commitlog_directory'] or 'commitlog'), - saved_caches_directory=get_database_directory( - config['saved_caches_directory'] or 'saved_caches')) - if has_cassandra_version('3.0'): - # Not yet configurable. Make configurable with Juju native storage. - dirs['hints_directory'] = get_database_directory('hints') - return dirs - - -def mountpoint(path): - '''Return the mountpoint that path exists on.''' - path = os.path.realpath(path) - while path != '/' and not os.path.ismount(path): - path = os.path.dirname(path) - return path - - -# FOR CHARMHELPERS -def is_lxc(): - '''Return True if we are running inside an LXC container.''' - with open('/proc/1/cgroup', 'r') as f: - return ':/lxc/' in f.readline() - - -# FOR CHARMHELPERS -def set_io_scheduler(io_scheduler, directory): - '''Set the block device io scheduler.''' - - assert os.path.isdir(directory) - - # The block device regex may be a tad simplistic. - block_regex = re.compile('\/dev\/([a-z]*)', re.IGNORECASE) - - output = subprocess.check_output(['df', directory], - universal_newlines=True) - - if not is_lxc(): - hookenv.log("Setting block device of {} to IO scheduler {}" - "".format(directory, io_scheduler)) - try: - block_dev = re.findall(block_regex, output)[0] - except IndexError: - hookenv.log("Unable to locate block device of {} (in container?)" - "".format(directory)) - return - sys_file = os.path.join("/", "sys", "block", block_dev, - "queue", "scheduler") - try: - host.write_file(sys_file, io_scheduler.encode('ascii'), - perms=0o644) - except OSError as e: - if e.errno == errno.EACCES: - hookenv.log("Got Permission Denied trying to set the " - "IO scheduler at {}. We may be in an LXC. " - "Exiting gracefully".format(sys_file), - WARNING) - elif e.errno == errno.ENOENT: - hookenv.log("Got no such file or directory trying to " - "set the IO scheduler at {}. It may be " - "this is an LXC, the device name is as " - "yet unknown to the charm, or LVM/RAID is " - "hiding the underlying device name. " - "Exiting gracefully".format(sys_file), - WARNING) - else: - raise e - else: - # Make no change if we are in an LXC - hookenv.log("In an LXC. Cannot set io scheduler {}" - "".format(io_scheduler)) - - -# FOR CHARMHELPERS -def recursive_chown(directory, owner="root", group="root"): - '''Change ownership of all files and directories in 'directory'. - - Ownership of 'directory' is also reset. - ''' - shutil.chown(directory, owner, group) - for root, dirs, files in os.walk(directory): - for dirname in dirs: - shutil.chown(os.path.join(root, dirname), owner, group) - for filename in files: - shutil.chown(os.path.join(root, filename), owner, group) - - -def maybe_backup(path): - '''Copy a file to file.orig, if file.orig does not already exist.''' - backup_path = path + '.orig' - if not os.path.exists(backup_path): - with open(path, 'rb') as f: - host.write_file(backup_path, f.read(), perms=0o600) - - -# FOR CHARMHELPERS -def get_package_version(package): - cache = fetch.apt_cache() - if package not in cache: - return None - pkgver = cache[package].current_ver - if pkgver is not None: - return pkgver.ver_str - return None - - -def get_jre(): - # DataStax Enterprise requires the Oracle JRE. - if get_cassandra_edition() == 'dse': - return 'oracle' - - config = hookenv.config() - jre = config['jre'].lower() - if jre not in ('openjdk', 'oracle'): - hookenv.log('Unknown JRE {!r} specified. Using OpenJDK'.format(jre), - ERROR) - jre = 'openjdk' - return jre - - -def get_cassandra_edition(): - config = hookenv.config() - edition = config['edition'].lower() - if edition not in ('community', 'dse'): - hookenv.log('Unknown edition {!r}. Using community.'.format(edition), - ERROR) - edition = 'community' - return edition - - -def get_cassandra_service(): - '''Cassandra upstart service''' - if get_cassandra_edition() == 'dse': - return 'dse' - return 'cassandra' - - -def get_cassandra_version(): - if get_cassandra_edition() == 'dse': - dse_ver = get_package_version('dse-full') - if not dse_ver: - return None - elif LooseVersion(dse_ver) >= LooseVersion('5.0'): - return '3.0' - elif LooseVersion(dse_ver) >= LooseVersion('4.7'): - return '2.1' - else: - return '2.0' - return get_package_version('cassandra') - - -def has_cassandra_version(minimum_ver): - cassandra_version = get_cassandra_version() - assert cassandra_version is not None, 'Cassandra package not yet installed' - return LooseVersion(cassandra_version) >= LooseVersion(minimum_ver) - - -def get_cassandra_config_dir(): - if get_cassandra_edition() == 'dse': - return '/etc/dse/cassandra' - else: - return '/etc/cassandra' - - -def get_cassandra_yaml_file(): - return os.path.join(get_cassandra_config_dir(), "cassandra.yaml") - - -def get_cassandra_env_file(): - return os.path.join(get_cassandra_config_dir(), "cassandra-env.sh") - - -def get_cassandra_rackdc_file(): - return os.path.join(get_cassandra_config_dir(), - "cassandra-rackdc.properties") - - -def get_cassandra_pid_file(): - edition = get_cassandra_edition() - if edition == 'dse': - pid_file = "/var/run/dse/dse.pid" - else: - pid_file = "/var/run/cassandra/cassandra.pid" - return pid_file - - -def get_cassandra_packages(): - edition = get_cassandra_edition() - if edition == 'dse': - packages = set(['dse-full']) - else: - packages = set(['cassandra']) # 'cassandra-tools' - - packages.add('ntp') - packages.add('run-one') - packages.add('netcat') - - jre = get_jre() - if jre == 'oracle': - # We can't use a packaged version of the Oracle JRE, as we - # are not allowed to bypass Oracle's click through license - # agreement. - pass - else: - # NB. OpenJDK 8 not available in trusty. This needs to come - # from a PPA or some other configured source. - packages.add('openjdk-8-jre-headless') - - return packages - - -@logged -def stop_cassandra(): - if is_cassandra_running(): - hookenv.log('Shutting down Cassandra') - host.service_stop(get_cassandra_service()) - if is_cassandra_running(): - hookenv.status_set('blocked', 'Cassandra failed to shut down') - raise SystemExit(0) - - -@logged -def start_cassandra(): - if is_cassandra_running(): - return - - actual_seeds = sorted(actual_seed_ips()) - assert actual_seeds, 'Attempting to start cassandra with empty seed list' - hookenv.config()['configured_seeds'] = actual_seeds - - if is_bootstrapped(): - status_set('maintenance', - 'Starting Cassandra with seeds {!r}' - .format(','.join(actual_seeds))) - else: - status_set('maintenance', - 'Bootstrapping with seeds {}' - .format(','.join(actual_seeds))) - - host.service_start(get_cassandra_service()) - - # Wait for Cassandra to actually start, or abort. - timeout = time.time() + RESTART_TIMEOUT - while time.time() < timeout: - if is_cassandra_running(): - return - time.sleep(1) - status_set('blocked', 'Cassandra failed to start') - raise SystemExit(0) - - -@logged -def reconfigure_and_restart_cassandra(overrides={}): - stop_cassandra() - configure_cassandra_yaml(overrides) - start_cassandra() - - -@logged -def remount_cassandra(): - '''If a new mountpoint is ready, migrate data across to it.''' - assert not is_cassandra_running() # Guard against data loss. - import relations - storage = relations.StorageRelation() - if storage.needs_remount(): - status_set('maintenance', 'Migrating data to new mountpoint') - hookenv.config()['bootstrapped_into_cluster'] = False - if storage.mountpoint is None: - hookenv.log('External storage AND DATA gone. ' - 'Reverting to local storage. ' - 'In danger of resurrecting old data. ', - WARNING) - else: - storage.migrate('/var/lib/cassandra', 'cassandra') - root = os.path.join(storage.mountpoint, 'cassandra') - os.chmod(root, 0o750) - - -@logged -def ensure_database_directories(): - '''Ensure that directories Cassandra expects to store its data in exist.''' - # Guard against changing perms on a running db. Although probably - # harmless, it causes shutil.chown() to fail. - assert not is_cassandra_running() - db_dirs = get_all_database_directories() - ensure_database_directory(db_dirs['commitlog_directory']) - ensure_database_directory(db_dirs['saved_caches_directory']) - if 'hints_directory' in db_dirs: - ensure_database_directory(db_dirs['hints_directory']) - for db_dir in db_dirs['data_file_directories']: - ensure_database_directory(db_dir) - - -CONNECT_TIMEOUT = 10 - - -@contextmanager -def connect(username=None, password=None, timeout=CONNECT_TIMEOUT, - auth_timeout=CONNECT_TIMEOUT): - # We pull the currently configured listen address and port from the - # yaml, rather than the service configuration, as it may have been - # overridden. - cassandra_yaml = read_cassandra_yaml() - address = cassandra_yaml['rpc_address'] - if address == '0.0.0.0': - address = 'localhost' - port = cassandra_yaml['native_transport_port'] - - if username is None or password is None: - username, password = superuser_credentials() - - auth = hookenv.config()['authenticator'] - if auth == 'AllowAllAuthenticator': - auth_provider = None - else: - auth_provider = cassandra.auth.PlainTextAuthProvider(username=username, - password=password) - - # Although we specify a reconnection_policy, it does not apply to - # the initial connection so we retry in a loop. - start = time.time() - until = start + timeout - auth_until = start + auth_timeout - while True: - cluster = cassandra.cluster.Cluster([address], port=port, - auth_provider=auth_provider) - try: - session = cluster.connect() - session.default_timeout = timeout - break - except cassandra.cluster.NoHostAvailable as x: - cluster.shutdown() - now = time.time() - # If every node failed auth, reraise one of the - # AuthenticationFailed exceptions. Unwrapping the exception - # means call sites don't have to sniff the exception bundle. - # We don't retry on auth fails; this method should not be - # called if the system_auth data is inconsistent. - auth_fails = [af for af in x.errors.values() - if isinstance(af, cassandra.AuthenticationFailed)] - if auth_fails: - if now > auth_until: - raise auth_fails[0] - if now > until: - raise - time.sleep(1) - try: - yield session - finally: - cluster.shutdown() - - -QUERY_TIMEOUT = 60 - - -def query(session, statement, consistency_level, args=None): - q = cassandra.query.SimpleStatement(statement, - consistency_level=consistency_level) - - until = time.time() + QUERY_TIMEOUT - for _ in backoff('query to execute'): - try: - return session.execute(q, args) - except Exception: - if time.time() > until: - raise - - -def encrypt_password(password): - return bcrypt.hashpw(password, bcrypt.gensalt()) - - -@logged -def ensure_user(session, username, encrypted_password, superuser=False): - '''Create the DB user if it doesn't already exist & reset the password.''' - auth = hookenv.config()['authenticator'] - if auth == 'AllowAllAuthenticator': - return # No authentication means we cannot create users - - if superuser: - hookenv.log('Creating SUPERUSER {}'.format(username)) - else: - hookenv.log('Creating user {}'.format(username)) - if has_cassandra_version('2.2'): - query(session, - 'INSERT INTO system_auth.roles ' - '(role, can_login, is_superuser, salted_hash) ' - 'VALUES (%s, TRUE, %s, %s)', - ConsistencyLevel.ALL, - (username, superuser, encrypted_password)) - else: - query(session, - 'INSERT INTO system_auth.users (name, super) VALUES (%s, %s)', - ConsistencyLevel.ALL, (username, superuser)) - query(session, - 'INSERT INTO system_auth.credentials (username, salted_hash) ' - 'VALUES (%s, %s)', - ConsistencyLevel.ALL, (username, encrypted_password)) - - -@logged -def create_unit_superuser_hard(): - '''Create or recreate the unit's superuser account. - - This method is used when there are no known superuser credentials - to use. We restart the node using the AllowAllAuthenticator and - insert our credentials directly into the system_auth keyspace. - ''' - username, password = superuser_credentials() - pwhash = encrypt_password(password) - hookenv.log('Creating unit superuser {}'.format(username)) - - # Restart cassandra without authentication & listening on localhost. - reconfigure_and_restart_cassandra( - dict(authenticator='AllowAllAuthenticator', rpc_address='localhost')) - for _ in backoff('superuser creation'): - try: - with connect() as session: - ensure_user(session, username, pwhash, superuser=True) - break - except Exception as x: - print(str(x)) - - # Restart Cassandra with regular config. - nodetool('flush') # Ensure our backdoor updates are flushed. - reconfigure_and_restart_cassandra() - - -def get_cqlshrc_path(): - return os.path.expanduser('~root/.cassandra/cqlshrc') - - -def superuser_username(): - return 'juju_{}'.format(re.subn(r'\W', '_', hookenv.local_unit())[0]) - - -def superuser_credentials(): - '''Return (username, password) to connect to the Cassandra superuser. - - The credentials are persisted in the root user's cqlshrc file, - making them easily accessible to the command line tools. - ''' - cqlshrc_path = get_cqlshrc_path() - cqlshrc = configparser.ConfigParser(interpolation=None) - cqlshrc.read([cqlshrc_path]) - - username = superuser_username() - - try: - section = cqlshrc['authentication'] - # If there happened to be an existing cqlshrc file, it might - # contain invalid credentials. Ignore them. - if section['username'] == username: - return section['username'], section['password'] - except KeyError: - hookenv.log('Generating superuser credentials into {}'.format( - cqlshrc_path)) - - config = hookenv.config() - - password = host.pwgen() - - hookenv.log('Generated username {}'.format(username)) - - # We set items separately, rather than together, so that we have a - # defined order for the ConfigParser to preserve and the tests to - # rely on. - cqlshrc.setdefault('authentication', {}) - cqlshrc['authentication']['username'] = username - cqlshrc['authentication']['password'] = password - cqlshrc.setdefault('connection', {}) - cqlshrc['connection']['hostname'] = hookenv.unit_public_ip() - if get_cassandra_version().startswith('2.0'): - cqlshrc['connection']['port'] = str(config['rpc_port']) - else: - cqlshrc['connection']['port'] = str(config['native_transport_port']) - - ini = io.StringIO() - cqlshrc.write(ini) - host.mkdir(os.path.dirname(cqlshrc_path), perms=0o700) - host.write_file(cqlshrc_path, ini.getvalue().encode('UTF-8'), perms=0o400) - - return username, password - - -def emit(*args, **kw): - # Just like print, but with plumbing and mocked out in the test suite. - print(*args, **kw) - sys.stdout.flush() - - -def nodetool(*cmd, timeout=120): - cmd = ['nodetool'] + [str(i) for i in cmd] - i = 0 - until = time.time() + timeout - for _ in backoff('nodetool to work'): - i += 1 - try: - if timeout is not None: - timeout = max(0, until - time.time()) - raw = subprocess.check_output(cmd, universal_newlines=True, - timeout=timeout, - stderr=subprocess.STDOUT) - - # Work around CASSANDRA-8776. - if 'status' in cmd and 'Error:' in raw: - hookenv.log('Error detected but nodetool returned success.', - WARNING) - raise subprocess.CalledProcessError(99, cmd, raw) - - hookenv.log('{} succeeded'.format(' '.join(cmd)), DEBUG) - out = raw.expandtabs() - emit(out) - return out - - except subprocess.CalledProcessError as x: - if i > 1: - emit(x.output.expandtabs()) # Expand tabs for juju debug-log. - if not is_cassandra_running(): - status_set('blocked', - 'Cassandra has unexpectedly shutdown') - raise SystemExit(0) - if time.time() >= until: - raise - - -def num_nodes(): - return len(get_bootstrapped_ips()) - - -def read_cassandra_yaml(): - cassandra_yaml_path = get_cassandra_yaml_file() - with open(cassandra_yaml_path, 'rb') as f: - return yaml.safe_load(f) - - -@logged -def write_cassandra_yaml(cassandra_yaml): - cassandra_yaml_path = get_cassandra_yaml_file() - host.write_file(cassandra_yaml_path, - yaml.safe_dump(cassandra_yaml).encode('UTF-8')) - - -def configure_cassandra_yaml(overrides={}, seeds=None): - cassandra_yaml_path = get_cassandra_yaml_file() - config = hookenv.config() - - maybe_backup(cassandra_yaml_path) # Its comments may be useful. - - cassandra_yaml = read_cassandra_yaml() - - # Most options just copy from config.yaml keys with the same name. - # Using the same name is preferred to match the actual Cassandra - # documentation. - simple_config_keys = ['cluster_name', 'num_tokens', - 'partitioner', 'authorizer', 'authenticator', - 'compaction_throughput_mb_per_sec', - 'stream_throughput_outbound_megabits_per_sec', - 'tombstone_warn_threshold', - 'tombstone_failure_threshold', - 'native_transport_port', 'rpc_port', - 'storage_port', 'ssl_storage_port'] - cassandra_yaml.update((k, config[k]) for k in simple_config_keys) - - seeds = ','.join(seeds or get_seed_ips()) # Don't include whitespace! - hookenv.log('Configuring seeds as {!r}'.format(seeds), DEBUG) - cassandra_yaml['seed_provider'][0]['parameters'][0]['seeds'] = seeds - - cassandra_yaml['listen_address'] = hookenv.unit_private_ip() - cassandra_yaml['rpc_address'] = '0.0.0.0' - if not get_cassandra_version().startswith('2.0'): - cassandra_yaml['broadcast_rpc_address'] = hookenv.unit_public_ip() - - dirs = get_all_database_directories() - cassandra_yaml.update(dirs) - - # GossipingPropertyFileSnitch is the only snitch recommended for - # production. It we allow others, we need to consider how to deal - # with the system_auth keyspace replication settings. - cassandra_yaml['endpoint_snitch'] = 'GossipingPropertyFileSnitch' - - # Per Bug #1523546 and CASSANDRA-9319, Thrift is disabled by default in - # Cassandra 2.2. Ensure it is enabled if rpc_port is non-zero. - if int(config['rpc_port']) > 0: - cassandra_yaml['start_rpc'] = True - - cassandra_yaml.update(overrides) - - write_cassandra_yaml(cassandra_yaml) - - -def get_pid_from_file(pid_file): - try: - with open(pid_file, 'r') as f: - pid = int(f.read().strip().split()[0]) - if pid <= 1: - raise ValueError('Illegal pid {}'.format(pid)) - return pid - except (ValueError, IndexError) as e: - hookenv.log("Invalid PID in {}.".format(pid_file)) - raise ValueError(e) - - -def is_cassandra_running(): - pid_file = get_cassandra_pid_file() - - try: - for _ in backoff('Cassandra to respond'): - # We reload the pid every time, in case it has gone away. - # If it goes away, a FileNotFound exception is raised. - pid = get_pid_from_file(pid_file) - - # This does not kill the process but checks for its - # existence. It raises an ProcessLookupError if the process - # is not running. - os.kill(pid, 0) - - if subprocess.call(["nodetool", "status"], - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) == 0: - hookenv.log( - "Cassandra PID {} is running and responding".format(pid)) - return True - except FileNotFoundError: - hookenv.log("Cassandra is not running. PID file does not exist.") - return False - except ProcessLookupError: - if os.path.exists(pid_file): - # File disappeared between reading the PID and checking if - # the PID is running. - hookenv.log("Cassandra is not running, but pid file exists.", - WARNING) - else: - hookenv.log("Cassandra is not running. PID file does not exist.") - return False - - -def get_auth_keyspace_replication(session): - if has_cassandra_version('3.0'): - statement = dedent('''\ - SELECT replication FROM system_schema.keyspaces - WHERE keyspace_name='system_auth' - ''') - r = query(session, statement, ConsistencyLevel.QUORUM) - return dict(r[0][0]) - else: - statement = dedent('''\ - SELECT strategy_options FROM system.schema_keyspaces - WHERE keyspace_name='system_auth' - ''') - r = query(session, statement, ConsistencyLevel.QUORUM) - return json.loads(r[0][0]) - - -@logged -def set_auth_keyspace_replication(session, settings): - # Live operation, so keep status the same. - status_set(hookenv.status_get()[0], - 'Updating system_auth rf to {!r}'.format(settings)) - statement = 'ALTER KEYSPACE system_auth WITH REPLICATION = %s' - query(session, statement, ConsistencyLevel.ALL, (settings,)) - - -@logged -def repair_auth_keyspace(): - # Repair takes a long time, and may need to be retried due to 'snapshot - # creation' errors, but should certainly complete within an hour since - # the keyspace is tiny. - status_set(hookenv.status_get()[0], - 'Repairing system_auth keyspace') - nodetool('repair', 'system_auth', timeout=3600) - - -def is_bootstrapped(unit=None): - '''Return True if the node has already bootstrapped into the cluster.''' - if unit is None or unit == hookenv.local_unit(): - return hookenv.config().get('bootstrapped', False) - elif coordinator.relid: - return bool(hookenv.relation_get(rid=coordinator.relid, - unit=unit).get('bootstrapped')) - else: - return False - - -def set_bootstrapped(): - # We need to store this flag in two locations. The peer relation, - # so peers can see it, and local state, for when we haven't joined - # the peer relation yet. actions.publish_bootstrapped_flag() - # calls this method again when necessary to ensure that state is - # propagated # if/when the peer relation is joined. - config = hookenv.config() - config['bootstrapped'] = True - if coordinator.relid is not None: - hookenv.relation_set(coordinator.relid, bootstrapped="1") - if config.changed('bootstrapped'): - hookenv.log('Bootstrapped') - else: - hookenv.log('Already bootstrapped') - - -def get_bootstrapped(): - units = [hookenv.local_unit()] - if coordinator.relid is not None: - units.extend(hookenv.related_units(coordinator.relid)) - return set([unit for unit in units if is_bootstrapped(unit)]) - - -def get_bootstrapped_ips(): - return set([unit_to_ip(unit) for unit in get_bootstrapped()]) - - -def unit_to_ip(unit): - if unit is None or unit == hookenv.local_unit(): - return hookenv.unit_private_ip() - elif coordinator.relid: - pa = hookenv.relation_get(rid=coordinator.relid, - unit=unit).get('private-address') - return hookenv._ensure_ip(pa) - else: - return None - - -def get_node_status(): - '''Return the Cassandra node status. - - May be NORMAL, JOINING, DECOMMISSIONED etc., or None if we can't tell. - ''' - if not is_cassandra_running(): - return None - raw = nodetool('netstats') - m = re.search(r'(?m)^Mode:\s+(\w+)$', raw) - if m is None: - return None - return m.group(1).upper() - - -def is_decommissioned(): - status = get_node_status() - if status in ('DECOMMISSIONED', 'LEAVING'): - hookenv.log('This node is {}'.format(status), WARNING) - return True - return False - - -@logged -def emit_describe_cluster(): - '''Run nodetool describecluster for the logs.''' - nodetool('describecluster') # Implicit emit - - -@logged -def emit_status(): - '''Run 'nodetool status' for the logs.''' - nodetool('status') # Implicit emit - - -@logged -def emit_netstats(): - '''Run 'nodetool netstats' for the logs.''' - nodetool('netstats') # Implicit emit - - -def emit_cluster_info(): - emit_describe_cluster() - emit_status() - emit_netstats() - - -# FOR CHARMHELPERS (and think of a better name) -def week_spread(unit_num): - '''Pick a time for a unit's weekly job. - - Jobs are spread out evenly throughout the week as best we can. - The chosen time only depends on the unit number, and does not change - if other units are added and removed; while the chosen time will not - be perfect, we don't have to worry about skipping a weekly job if - units are added or removed at the wrong moment. - - Returns (dow, hour, minute) suitable for cron. - ''' - def vdc(n, base=2): - '''Van der Corpet sequence. 0, 0.5, 0.25, 0.75, 0.125, 0.625, ... - - http://rosettacode.org/wiki/Van_der_Corput_sequence#Python - ''' - vdc, denom = 0, 1 - while n: - denom *= base - n, remainder = divmod(n, base) - vdc += remainder / denom - return vdc - # We could use the vdc() function to distribute jobs evenly throughout - # the week, so unit 0==0, unit 1==3.5days, unit 2==1.75 etc. But - # plain modulo for the day of week is easier for humans and what - # you expect for 7 units or less. - sched_dow = unit_num % 7 - # We spread time of day so each batch of 7 units gets the same time, - # as far spread out from the other batches of 7 units as possible. - minutes_in_day = 24 * 60 - sched = timedelta(minutes=int(minutes_in_day * vdc(unit_num // 7))) - sched_hour = sched.seconds // (60 * 60) - sched_minute = sched.seconds // 60 - sched_hour * 60 - return (sched_dow, sched_hour, sched_minute) - - -# FOR CHARMHELPERS. This should be a constant in nrpe.py -def local_plugins_dir(): - return '/usr/local/lib/nagios/plugins' - - -def leader_ping(): - '''Make a change in the leader settings, waking the non-leaders.''' - assert hookenv.is_leader() - last = int(hookenv.leader_get('ping') or 0) - hookenv.leader_set(ping=str(last + 1)) - - -def get_unit_superusers(): - '''Return the set of units that have had their superuser accounts created. - ''' - raw = hookenv.leader_get('superusers') - return set(json.loads(raw or '[]')) - - -def set_unit_superusers(superusers): - hookenv.leader_set(superusers=json.dumps(sorted(superusers))) - - -def status_set(state, message): - '''Set the unit status and log a message.''' - hookenv.status_set(state, message) - hookenv.log('{} unit state: {}'.format(state, message)) - - -def service_status_set(state, message): - '''Set the service status and log a message.''' - subprocess.check_call(['status-set', '--service', state, message]) - hookenv.log('{} service state: {}'.format(state, message)) - - -def get_service_name(relid): - '''Return the service name for the other end of relid.''' - units = hookenv.related_units(relid) - if units: - return units[0].split('/', 1)[0] - else: - return None - - -def peer_relid(): - return coordinator.relid - - -@logged -def set_active(): - '''Set happy state''' - if hookenv.unit_private_ip() in get_seed_ips(): - msg = 'Live seed' - else: - msg = 'Live node' - status_set('active', msg) - - if hookenv.is_leader(): - n = num_nodes() - if n == 1: - n = 'Single' - service_status_set('active', '{} node cluster'.format(n)) - - -def update_hosts_file(hosts_file, hosts_map): - """Older versions of Cassandra need own hostname resolution.""" - with open(hosts_file, 'r') as hosts: - lines = hosts.readlines() - - newlines = [] - for ip, hostname in hosts_map.items(): - if not ip or not hostname: - continue - - keepers = [] - for line in lines: - _line = line.split() - if len(_line) < 2 or not (_line[0] == ip or hostname in _line[1:]): - keepers.append(line) - else: - hookenv.log('Marking line {!r} for update or removal' - ''.format(line.strip()), level=DEBUG) - - lines = keepers - newlines.append('{} {}\n'.format(ip, hostname)) - - lines += newlines - - with tempfile.NamedTemporaryFile(delete=False) as tmpfile: - with open(tmpfile.name, 'w') as hosts: - for line in lines: - hosts.write(line) - - os.rename(tmpfile.name, hosts_file) - os.chmod(hosts_file, 0o644) diff --git a/charms/trusty/cassandra/hooks/hooks.py b/charms/trusty/cassandra/hooks/hooks.py deleted file mode 100644 index ef38c20..0000000 --- a/charms/trusty/cassandra/hooks/hooks.py +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -from charmhelpers import fetch -from charmhelpers.core import hookenv - - -def set_proxy(): - import os - config = hookenv.config() - if config['http_proxy']: - os.environ['ftp_proxy'] = config['http_proxy'] - os.environ['http_proxy'] = config['http_proxy'] - os.environ['https_proxy'] = config['http_proxy'] - - -def bootstrap(): - try: - import bcrypt # NOQA: flake8 - import cassandra # NOQA: flake8 - except ImportError: - packages = ['python3-bcrypt', 'python3-cassandra'] - set_proxy() - fetch.configure_sources(update=True) - fetch.apt_install(packages,fatal=True) - import bcrypt # NOQA: flake8 - import cassandra # NOQA: flake8 - - -def default_hook(): - if not hookenv.has_juju_version('1.24'): - hookenv.status_set('blocked', 'Requires Juju 1.24 or higher') - # Error state, since we don't have 1.24 to give a nice blocked state. - raise SystemExit(1) - - # These need to be imported after bootstrap() or required Python - # packages may not have been installed. - import definitions - - # Only useful for debugging, or perhaps have this enabled with a config - # option? - # from loglog import loglog - # loglog('/var/log/cassandra/system.log', prefix='C*: ') - - hookenv.log('*** {} Hook Start'.format(hookenv.hook_name())) - sm = definitions.get_service_manager() - sm.manage() - hookenv.log('*** {} Hook Done'.format(hookenv.hook_name())) diff --git a/charms/trusty/cassandra/hooks/install b/charms/trusty/cassandra/hooks/install deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/install +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/leader-elected b/charms/trusty/cassandra/hooks/leader-elected deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/leader-elected +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/leader-settings-changed b/charms/trusty/cassandra/hooks/leader-settings-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/leader-settings-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/loglog.py b/charms/trusty/cassandra/hooks/loglog.py deleted file mode 100644 index 33f3af8..0000000 --- a/charms/trusty/cassandra/hooks/loglog.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import atexit -import subprocess -import threading -import time - -from charmhelpers.core import hookenv - - -def loglog(filename, prefix='', level=hookenv.DEBUG): - '''Mirror an arbitrary log file to the Juju hook log in the background.''' - tailproc = subprocess.Popen(['tail', '-F', filename], - stdout=subprocess.PIPE, - universal_newlines=True) - atexit.register(tailproc.terminate) - - def loglog_t(tailproc=tailproc): - while True: - line = tailproc.stdout.readline() - if line: - hookenv.log('{}{}'.format(prefix, line), level) - else: - time.sleep(0.1) - continue - - t = threading.Thread(target=loglog_t, daemon=True) - t.start() diff --git a/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed b/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/nrpe-external-master-relation-changed +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/relations.py b/charms/trusty/cassandra/hooks/relations.py deleted file mode 100644 index f7870a1..0000000 --- a/charms/trusty/cassandra/hooks/relations.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import os.path - -import yaml - -from charmhelpers.core import hookenv, host -from charmhelpers.core.hookenv import log, WARNING -from charmhelpers.core.services.helpers import RelationContext - -from coordinator import coordinator - - -class PeerRelation(RelationContext): - interface = 'cassandra-cluster' - name = 'cluster' - - def is_ready(self): - # All units except the leader need to wait until the peer - # relation is available. - if coordinator.relid is not None or hookenv.is_leader(): - return True - return False - - -# FOR CHARMHELPERS (if we can integrate Juju 1.24 storage too) -class StorageRelation(RelationContext): - '''Wait for the block storage mount to become available. - - Charms using this should add a 'wait_for_storage_broker' boolean - configuration option in their config.yaml file. This is necessary - to avoid potential data loss race conditions, because otherwise a - unit will be started up using local disk before it becomes aware - that it should be using external storage. - - 'relname' is the relation name. - - 'mountpount' is the mountpoint. Use the default if you have a single - block storage broker relation. The default is calculated to avoid - configs using the unit name (/srv/${service}_${unitnumber}). - ''' - interface = 'block-storage' - mountpoint = None - - def __init__(self, name=None, mountpoint=None): - if name is None: - name = self._get_relation_name() - super(StorageRelation, self).__init__(name) - - if mountpoint is None: - mountpoint = os.path.join('/srv/', - hookenv.local_unit().replace('/', '_')) - self._requested_mountpoint = mountpoint - - if len(self.get('data', [])) == 0: - self.mountpoint = None - elif mountpoint == self['data'][0].get('mountpoint', None): - self.mountpoint = mountpoint - else: - self.mountpoint = None - - def _get_relation_name(self): - with open(os.path.join(hookenv.charm_dir(), - 'metadata.yaml'), 'r') as mdf: - md = yaml.safe_load(mdf) - for section in ['requires', 'provides']: - for relname in md.get(section, {}).keys(): - if md[section][relname]['interface'] == 'block-storage': - return relname - raise LookupError('No block-storage relation defined') - - def is_ready(self): - if hookenv.config('wait_for_storage_broker'): - if self.mountpoint: - log("External storage mounted at {}".format(self.mountpoint)) - return True - else: - log("Waiting for block storage broker to mount {}".format( - self._requested_mountpoint), WARNING) - return False - return True - - def provide_data(self, remote_service, service_ready): - hookenv.log('Requesting mountpoint {} from {}' - .format(self._requested_mountpoint, remote_service)) - return dict(mountpoint=self._requested_mountpoint) - - def needs_remount(self): - config = hookenv.config() - return config.get('live_mountpoint') != self.mountpoint - - def migrate(self, src_dir, subdir): - assert self.needs_remount() - assert subdir, 'Can only migrate to a subdirectory on a mount' - - config = hookenv.config() - config['live_mountpoint'] = self.mountpoint - - if self.mountpoint is None: - hookenv.log('External storage AND DATA gone.' - 'Reverting to original local storage', WARNING) - return - - dst_dir = os.path.join(self.mountpoint, subdir) - if os.path.exists(dst_dir): - hookenv.log('{} already exists. Not migrating data.'.format( - dst_dir)) - return - - # We are migrating the contents of src_dir, so we want a - # trailing slash to ensure rsync's behavior. - if not src_dir.endswith('/'): - src_dir += '/' - - # We don't migrate data directly into the new destination, - # which allows us to detect a failed migration and recover. - tmp_dst_dir = dst_dir + '.migrating' - hookenv.log('Migrating data from {} to {}'.format( - src_dir, tmp_dst_dir)) - host.rsync(src_dir, tmp_dst_dir, flags='-av') - - hookenv.log('Moving {} to {}'.format(tmp_dst_dir, dst_dir)) - os.rename(tmp_dst_dir, dst_dir) - - assert not self.needs_remount() diff --git a/charms/trusty/cassandra/hooks/stop b/charms/trusty/cassandra/hooks/stop deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/stop +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() diff --git a/charms/trusty/cassandra/hooks/upgrade-charm b/charms/trusty/cassandra/hooks/upgrade-charm deleted file mode 100755 index 9128cab..0000000 --- a/charms/trusty/cassandra/hooks/upgrade-charm +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/python3 -# Copyright 2015 Canonical Ltd. -# -# This file is part of the Cassandra Charm for Juju. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranties of -# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR -# PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. -import hooks -if __name__ == '__main__': - hooks.bootstrap() - hooks.default_hook() |