#!/usr/bin/env python # # Copyright 2014 Huawei Technologies Co. Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """binary to deploy a cluster by compass client api.""" import os import re import socket import sys import time import yaml import netaddr import requests import json import itertools import threading from collections import defaultdict from restful import Client ROLE_UNASSIGNED = True ROLE_ASSIGNED = False import log as logging LOG = logging.getLogger(__name__) from oslo_config import cfg CONF = cfg.CONF def byteify(input): if isinstance(input, dict): return dict([(byteify(key),byteify(value)) for key,value in input.iteritems()]) elif isinstance(input, list): return [byteify(element) for element in input] elif isinstance(input, unicode): return input.encode('utf-8') else: return input opts = [ cfg.StrOpt('expansion', help='is this an expansion?', default='false'), cfg.StrOpt('compass_server', help='compass server url', default='http://127.0.0.1/api'), cfg.StrOpt('compass_user_email', help='compass user email', default='admin@huawei.com'), cfg.StrOpt('compass_user_password', help='compass user password', default='admin'), cfg.StrOpt('switch_ips', help='comma seperated switch ips', default=''), cfg.StrOpt('switch_credential', help='comma separated =', default='version=2c,community=public'), cfg.IntOpt('switch_max_retries', help='max retries of poll switch', default=10), cfg.IntOpt('switch_retry_interval', help='interval to repoll switch', default=10), cfg.BoolOpt('poll_switches', help='if the client polls switches', default=True), cfg.StrOpt('machines', help='comma separated mac addresses of machines', default=''), cfg.StrOpt('subnets', help='comma seperated subnets', default=''), cfg.StrOpt('adapter_name', help='adapter name', default=''), cfg.StrOpt('adapter_os_pattern', help='adapter os name', default=r'^(?i)centos.*'), cfg.StrOpt('adapter_target_system_pattern', help='adapter target system name', default='^openstack$'), cfg.StrOpt('adapter_flavor_pattern', help='adapter flavor name', default='allinone'), cfg.StrOpt('cluster_name', help='cluster name', default='cluster1'), cfg.StrOpt('language', help='language', default='EN'), cfg.StrOpt('timezone', help='timezone', default='GMT'), cfg.StrOpt('http_proxy', help='http proxy', default=''), cfg.StrOpt('https_proxy', help='https proxy', default=''), cfg.StrOpt('no_proxy', help='no proxy', default=''), cfg.StrOpt('ntp_server', help='ntp server', default=''), cfg.StrOpt('dns_servers', help='dns servers', default=''), cfg.StrOpt('domain', help='domain', default=''), cfg.StrOpt('search_path', help='search path', default=''), cfg.StrOpt('local_repo_url', help='local repo url', default=''), cfg.StrOpt('default_gateway', help='default gateway', default=''), cfg.StrOpt('server_credential', help=( 'server credential formatted as ' '=' ), default='root=root'), cfg.StrOpt('os_config_json_file', help='json formatted os config file', default=''), cfg.StrOpt('service_credentials', help=( 'comma seperated service credentials formatted as ' ':=,...' ), default=''), cfg.StrOpt('console_credentials', help=( 'comma seperated console credential formated as ' ':=' ), default=''), cfg.StrOpt('hostnames', help='comma seperated hostname', default=''), cfg.StrOpt('host_networks', help=( 'semicomma seperated host name and its networks ' ':=||,...' ), default=''), cfg.StrOpt('partitions', help=( 'comma seperated partitions ' '=' ), default='tmp:percentage=10%,var:percentage=30%,home:percentage=30%'), cfg.StrOpt('network_mapping', help=( 'comma seperated network mapping ' '=' ), default=''), cfg.StrOpt('package_config_json_file', help='json formatted os config file', default=''), cfg
##############################################################################
# Copyright (c) 2017 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
# Sample benchmark task config file
# measure storage performance using fio
#
# For this sample just like running the command below on the test vm and
# getting benchmark info back to the yardstick.
#
# sudo fio -filename=/home/ubuntu/data.raw -bs=4k -ipdepth=1 -rw=rw \
#          -ramp_time=10 -runtime=60 -name=yardstick-fio -ioengine=libaio \
#          -direct=1 -group_reporting -numjobs=1 -time_based \
#          --output-format=json

schema: "yardstick:task:0.1"

{% set rw = rw or "randrw" %}
{% set bs = bs or "8k" %}
{% set size = size or "100g" %}
{% set rwmixwrite = rwmixwrite or "50" %}
{% set numjobs = numjobs or "1" %}
{% set direct = direct or "1" %}

scenarios:
-
  type: Fio
  options:
    filename: /dev/vdb
    bs: {{bs}}
    rw: {{rw}}
    size: {{size}}
    rwmixwrite: {{rwmixwrite}}
    numjobs: {{numjobs}}
    direct: {{direct}}
    ramp_time: 10

  host: fio.fio_volume

  runner:
    type: Duration
    duration: 60
    interval: 1

  sla:
    read_bw: 6000
    read_iops: 1500
    read_lat: 500.1
    write_bw: 6000
    write_iops: 1500
    write_lat: 500.1
    action: monitor

context:
  name: fio_volume
  image: yardstick-image
  flavor: yardstick-flavor
  user: ubuntu
  servers:
    fio:
      volume:
        name: fio-volume
        size: 200
      volume_mountpoint: "/dev/vdb"
      floating_ip: true
  networks:
    test:
      cidr: "10.0.1.0/24"
      port_security_enabled: true
_credentials.split(',') if console_credential ] LOG.info( 'console credentials: %s', console_credentials ) console_credential_cfg = {} for console_credential in console_credentials: if ':' not in console_credential: raise Exception( 'there is no : in console credential %s' % console_credential ) console_name, console_pair = console_credential.split(':', 1) if '=' not in console_pair: raise Exception( 'there is no = in console %s security' % console_name ) username, password = console_pair.split('=', 1) console_credential_cfg[console_name] = { 'username': username, 'password': password } package_config["security"] = {"service_credentials": service_credential_cfg, "console_credentials": console_credential_cfg} network_mapping = dict([ network_pair.split('=', 1) for network_pair in CONF.network_mapping.split(',') if '=' in network_pair ]) package_config['network_mapping'] = network_mapping assert(os.path.exists(CONF.network_cfg)) network_cfg = yaml.load(open(CONF.network_cfg)) package_config["network_cfg"] = network_cfg assert(os.path.exists(CONF.neutron_cfg)) neutron_cfg = yaml.load(open(CONF.neutron_cfg)) package_config["neutron_config"] = neutron_cfg """ package_config_filename = CONF.package_config_json_file if package_config_filename: util.merge_dict( package_config, _load_config(package_config_filename) ) """ package_config['ha_proxy'] = {} if CONF.cluster_vip: package_config["ha_proxy"]["vip"] = CONF.cluster_vip package_config['enable_secgroup'] = (CONF.enable_secgroup == "true") package_config['enable_fwaas'] = (CONF.enable_fwaas== "true") package_config['enable_vpnaas'] = (CONF.enable_vpnaas== "true") package_config['odl_l3_agent'] = "Enable" if CONF.odl_l3_agent == "Enable" else "Disable" package_config['moon'] = "Enable" if CONF.moon == "Enable" else "Disable" package_config['onos_sfc'] = "Enable" if CONF.onos_sfc == "Enable" else "Disable" status, resp = self.client.update_cluster_config( cluster_id, package_config=package_config) LOG.info( 'set package config %s to cluster %s status: %s, resp: %s', package_config, cluster_id, status, resp) if not self.is_ok(status): raise RuntimeError("set cluster package_config failed") def set_host_roles(self, cluster_id, host_id, roles): status, response = self.client.update_cluster_host( cluster_id, host_id, roles=roles) LOG.info( 'set cluster %s host %s roles %s status %s: %s', cluster_id, host_id, roles, status, response ) if not self.is_ok(status): raise RuntimeError("set host roles failed") for role in roles: if role in self.role_mapping: self.role_mapping[role] = ROLE_ASSIGNED def set_all_hosts_roles(self, cluster_id): for host_str in CONF.host_roles.split(';'): host_str = host_str.strip() hostname, roles_str = host_str.split('=', 1) assert(hostname in self.host_mapping) host_id = self.host_mapping[hostname] roles = [role.strip() for role in roles_str.split(',') if role] self.set_host_roles(cluster_id, host_id, roles) self.host_roles[hostname] = roles unassigned_hostnames = list(set(self.host_mapping.keys()) \ - set(self.host_roles.keys())) unassigned_roles = [ role for role, status in self.role_mapping.items() if is_role_unassigned(status)] assert(len(unassigned_hostnames) >= len(unassigned_roles)) for hostname, role in map(None, unassigned_hostnames, unassigned_roles): host_id = self.host_mapping[hostname] self.set_host_roles(cluster_id, host_id, [role]) self.host_roles[hostname] = [role] unassigned_hostnames = list(set(self.host_mapping.keys()) \ - set(self.host_roles.keys())) if not unassigned_hostnames: return # assign default roles to unassigned hosts default_roles = [ role for role in CONF.default_roles.split(',') if role ] assert(default_roles) cycle_roles = itertools.cycle(default_roles) for hostname in unassigned_hostnames: host_id = self.host_mapping[hostname] roles = [cycle_roles.next()] self.set_host_roles(cluster_id, host_id, roles) self.host_roles[hostname] = roles def deploy_clusters(self, cluster_id): host_ids = self.host_mapping.values() status, response = self.client.review_cluster( cluster_id, review={'hosts': host_ids} ) LOG.info( 'review cluster %s hosts %s, status %s: %s', cluster_id, host_ids, status, response ) #TODO, what this doning? if not self.is_ok(status): raise RuntimeError("review cluster host failed") status, response = self.client.deploy_cluster( cluster_id, deploy={'hosts': host_ids} ) LOG.info( 'deploy cluster %s hosts %s status %s: %s', cluster_id, host_ids, status, response ) if not self.is_ok(status): raise RuntimeError("deploy cluster failed") def redeploy_clusters(self, cluster_id): status, response = self.client.redeploy_cluster( cluster_id ) if not self.is_ok(status): LOG.info( 'deploy cluster %s status %s: %s', cluster_id, status, response ) raise RuntimeError("redeploy cluster failed") def get_cluster_state(self, cluster_id): for _ in range(10): try: status, cluster_state = self.client.get_cluster_state(cluster_id) if self.is_ok(status): break except: status = 500 cluster_state = "" LOG.error("can not get cluster %s's state, try again" % cluster_id) time.sleep(6) return status, cluster_state def get_installing_progress(self, cluster_id): def _get_installing_progress(): """get intalling progress.""" deployment_timeout = time.time() + 60 * float(CONF.deployment_timeout) current_time = time.time while current_time() < deployment_timeout: status, cluster_state = self.get_cluster_state(cluster_id) if not self.is_ok(status): raise RuntimeError("can not get cluster state") elif cluster_state['state'] == 'SUCCESSFUL': LOG.info( 'get cluster %s state status %s: %s, successful', cluster_id, status, cluster_state ) break elif cluster_state['state'] == 'ERROR': raise RuntimeError( 'get cluster %s state status %s: %s, error', (cluster_id, status, cluster_state) ) time.sleep(5) if current_time() >= deployment_timeout: LOG.info("current_time=%s, deployment_timeout=%s" \ % (current_time(), deployment_timeout)) LOG.info("cobbler status:") os.system("ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null \ -i %s root@192.168.200.2 \ 'cobbler status'" % (CONF.rsa_file)) raise RuntimeError("installation timeout") try: _get_installing_progress() finally: # do this twice, make sure process be killed kill_print_proc() kill_print_proc() def check_dashboard_links(self, cluster_id): dashboard_url = CONF.dashboard_url if not dashboard_url: LOG.info('no dashboarde url set') return dashboard_link_pattern = re.compile( CONF.dashboard_link_pattern) r = requests.get(dashboard_url, verify=False) r.raise_for_status() match = dashboard_link_pattern.search(r.text) if match: LOG.info( 'dashboard login page for cluster %s can be downloaded', cluster_id) else: msg = ( '%s failed to be downloaded\n' 'the context is:\n%s\n' ) % (dashboard_url, r.text) raise Exception(msg) def print_ansible_log(): os.system("ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@192.168.200.2 \ 'while ! tail -f /var/ansible/run/%s-%s/ansible.log 2>/dev/null; do :; sleep 1; done'" % (CONF.rsa_file, CONF.adapter_name, CONF.cluster_name)) def kill_print_proc(): os.system("ps aux|grep -v grep|grep -E 'ssh.+root@192.168.200.2'|awk '{print $2}'|xargs kill -9") def deploy(): if CONF.expansion == "false": client = CompassClient() machines = client.get_machines() LOG.info('machines are %s', machines) client.add_subnets() adapter_id, os_id, flavor_id = client.get_adapter() cluster_id = client.add_cluster(adapter_id, os_id, flavor_id) client.add_cluster_hosts(cluster_id, machines) client.set_host_networking() client.set_cluster_os_config(cluster_id) if flavor_id: client.set_cluster_package_config(cluster_id) client.set_all_hosts_roles(cluster_id) client.deploy_clusters(cluster_id) LOG.info("compass OS installtion is begin") threading.Thread(target=print_ansible_log).start() client.get_installing_progress(cluster_id) client.check_dashboard_links(cluster_id) else: client = CompassClient() machines = client.get_machines() LOG.info('machines are %s', machines) client.add_subnets() status, response = client.client.list_clusters() cluster_id = 1 for cluster in response: if cluster['name'] == CONF.cluster_name: cluster_id = cluster['id'] client.add_cluster_hosts(cluster_id, machines) client.set_host_networking() client.set_cluster_os_config(cluster_id) client.set_cluster_package_config(cluster_id) client.set_all_hosts_roles(cluster_id) client.deploy_clusters(cluster_id) threading.Thread(target=print_ansible_log).start() client.get_installing_progress(cluster_id) def redeploy(): client = CompassClient() cluster_id = client.list_clusters() client.redeploy_clusters(cluster_id) client.get_installing_progress(cluster_id) client.check_dashboard_links(cluster_id) def main(): if CONF.deploy_flag == "redeploy": redeploy() else: deploy() if __name__ == "__main__": CONF(args=sys.argv[1:]) main()