From 8b1cb4abd504e6286a691575c31c6a69aa6f8e50 Mon Sep 17 00:00:00 2001 From: Leo Wang Date: Sun, 9 Oct 2016 21:53:00 -0400 Subject: Add documentation for dovetail tool JIRA:DOVETAIL-28 Change-Id: I2478655d951a65a11f39e9ceba10969e56274cc3 Signed-off-by: Leo Wang --- .../installationprocedure/feature.configuation.rst | 45 +++++++ docs/installationprocedure/index.rst | 14 ++ .../installation.instruction.rst | 142 +++++++++++++++++++++ docs/userguide/01-introduction.rst | 4 +- docs/userguide/04-dovetail_config_template.rst | 134 +++++++++++++++++++ docs/userguide/index.rst | 1 + 6 files changed, 338 insertions(+), 2 deletions(-) create mode 100644 docs/installationprocedure/feature.configuation.rst create mode 100644 docs/installationprocedure/index.rst create mode 100644 docs/installationprocedure/installation.instruction.rst create mode 100644 docs/userguide/04-dovetail_config_template.rst diff --git a/docs/installationprocedure/feature.configuation.rst b/docs/installationprocedure/feature.configuation.rst new file mode 100644 index 00000000..0bc4ae33 --- /dev/null +++ b/docs/installationprocedure/feature.configuation.rst @@ -0,0 +1,45 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +Testcase Template Syntax +========================= + +The testcases certification are defined in ``dovetail/dovetail/testcase``. +Take testcase ``ipv6.tc001.yml`` as an example. + +Basic template syntax +--------------------- + +:: + + dovetail.ipv6.tc001: + name: dovetail.ipv6.tc001 + objective: VIM ipv6 operations, to create/delete network, port and subnet in bulk operation + scripts: + type: functest + testcase: tempest_smoke_serial + sub_testcase_list: + - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network + - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port + - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet + +The testcase needs at least three keys named as 'name', 'objective' and 'scripts'. The whole +dovetail project can just recognize testcases by the 'name' section, which here is +'dovetail.ipv6.tc001'. The 'objective' describes what exactly this testcase does. In the +'scripts' part, there are some subsections such as 'type', 'testcase' and 'sub_testcase_list'. +Dovetail now supports two kinds of types, functest and yardstick. If you define the type as +functest, then you need to give the functest testcase that you want to use. If the type is +yardstick, then a yardstick testcase is needed. The 'sub_testcase_list' lists the sub_testcases +that you put a high value on. Even though the whole testcase faild, we still think it passed +when all the sub_testcases pass. The sub_testcase_list is just available for functest. When +for yardstick testcase, retain 'sub_testcase_list' section and keep the content empty. + +:: + + sub_testcase_list: + - + +This is the definition of the dovetail testcases. They can just be tested by adding into +scenarios such as ``cert/basic.yml``. diff --git a/docs/installationprocedure/index.rst b/docs/installationprocedure/index.rst new file mode 100644 index 00000000..9a755800 --- /dev/null +++ b/docs/installationprocedure/index.rst @@ -0,0 +1,14 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +================== +Dovetail Overview +================== + +.. toctree:: + :maxdepth: 2 + + installation.instruction + feature.configuation diff --git a/docs/installationprocedure/installation.instruction.rst b/docs/installationprocedure/installation.instruction.rst new file mode 100644 index 00000000..aa1b436a --- /dev/null +++ b/docs/installationprocedure/installation.instruction.rst @@ -0,0 +1,142 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +===================== +Dovetail Installation +===================== + +Abstract +======== + +Dovetail currently supports installation on Ubuntu 14.04 or by using a docker image. +Detailed steps about installing dovetail can be found below. + +To use dovetail you should have access to an OpenStack environment, +with at least Nova, Neutron, Glance, Keystone and Heat installed. + + +Run dovetail on local machine +============================= + +The steps needed to run dovetail on Ubuntu are: + +1. Download source code and prepare environment. +2. Create certification configuration file. +3. Run certification. + + +Download dovetail and prepare environment +----------------------------------------- + +The source code of dovetail can be got from Gerrit: + +:: + + git clone https://gerrit.opnfv.org/gerrit/dovetail + +After that you need to prepare the environment which means install some tools, +modules and anything else dovetail needs. A file named ``prepare_env.py`` can +do all of these for you. + +:: + + cd dovetail/dovetail/ + python prepare_env.py + +Now the environment should be prepared for running the certification. + +Besides, you may want to install it in a python virtualenv, that is beyond the scope +of this installation instruction. + +Create certification configuration file +--------------------------------------- + +You can wirte a certification configuration file with the format of yaml, +which can be defined to meet your own requirements. The yaml file should +be located in the folder ``dovetail/dovetail/cert``. There provide a sample +file named ``basic.yml``. + +:: + + certification_basic: + name: certification_basic + testcase_list: + - dovetail.ipv6.tc001 + +As you can see, the configuration file combines the testcases you want to run +together, and all of them should already been defined in ``dovetail/dovetail/testcase``. +The name of the configuration file should be certification_%s, where %s (here is basic) +can be used to run this certification. + + +Run the certification +--------------------- + +When you get the dovetail source code, prepare the environment and define the +certification configuration file, you can then run the certification. Take +certification_basic as the example. You can run it using the file ``run.py`` with +basic (scenario name) as its argument. + +:: + + python run.py --scenario basic + +If you did not give an argument, it will be default set as basic. + + +Run dovetail on Docker container +================================ + +steps: +1. pull image +2. run container +3. config +4. run testcase + +pull image +---------- + +Get the latest docker image from docker hub. + +:: + sudo docker pull opnfv/dovetail:latest + +run container +------------- + +Start a container from the image that you just pulled. + +:: + sudo docker run -it --privileged=true -v /home/opnfv/dovetail/results:/home/opnfv/dovetail/results + -v /var/run/docker.sock:/var/run/docker.sock opnfv/dovetail:latest "/bin/bash" + +config +------ + +For now, we need only change value of ``INSTALLER_TYPE`` and ``INSTALLER_IP`` +of the variable ``envs`` to your own environment. + +:: + sudo docker exec -it $(CONTAINER_ID) "/bin/bash" + vi /home/opnfv/dovetail/dovetail/conf/functest_config.yml + vi /home/opnfv/dovetail/dovetail/conf/yardstick_config.yml + +run testcase +------------ + +In docker, you just run following cmds to execute the testcase. + +:: + cd /home/opnfv/dovetail/dovetail + python run.py + + + + +Output +====== + +The running log is stored in ``/home/opnfv/dovetail/results/dovetail.log``. +The report of certification is stored in ``/home/opnfv/dovetail/results/dovetail_report.txt``. diff --git a/docs/userguide/01-introduction.rst b/docs/userguide/01-introduction.rst index f3d57be8..dbe33d5d 100644 --- a/docs/userguide/01-introduction.rst +++ b/docs/userguide/01-introduction.rst @@ -2,7 +2,7 @@ Dovetail Introduction ====================== -**Welcome to Dovetail' documentation !** +**Welcome to Dovetail's documentation !** .. _Dovetail: https://wiki.opnfv.org/dovetail @@ -11,7 +11,7 @@ Dovetail_ is an OPNFV Project. This project intends to define and provide a set of OPNFV related validation criteria that will provide input for the evaluation of the use of OPNFV trademarks. The dovetail project will be executed with the guidance and oversight of the Compliance -and Certification committee and work to secure the goals of the C&C committee for each release +and Certification committee and work to secure the goals of the C&C committee for each release. This project intends to incrementally define qualification criteria that establish the foundations of how we are able to measure the ability diff --git a/docs/userguide/04-dovetail_config_template.rst b/docs/userguide/04-dovetail_config_template.rst new file mode 100644 index 00000000..fce99d21 --- /dev/null +++ b/docs/userguide/04-dovetail_config_template.rst @@ -0,0 +1,134 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +Config Template Syntax +====================== + +Dovetail uses Functest/Yardstick Docker container to run its testcases. So you need to give +configurations for building the container and the commands it needs to do. In dovetail, +all of these are defined in config yaml files ``dovetail/dovetail/conf/functest_config.yml`` +and ``dovetail/dovetail/conf/yardstick_config.yml``. + +Functest template syntax +------------------------ + +For example, you can define your ``functest_config.yml`` as: + +:: + + functest: + image_name: opnfv/functest + docker_tag: latest + envs: '-e INSTALLER_TYPE=compass -e INSTALLER_IP=192.168.200.2 + -e NODE_NAME=dovetail-pod -e DEPLOY_SCENARIO=ha_nosdn + -e BUILD_TAG=dovetail -e CI_DEBUG=true -e DEPLOY_TYPE=baremetal' + opts: '-id --privileged=true' + pre_condition: + cmds: + - 'echo test for precondition' + testcase: + cmds: + - 'python /home/opnfv/repos/functest/ci/prepare_env.py start' + - 'python /home/opnfv/repos/functest/ci/run_tests.py -t {{script_testcase}} -r' + post_condition: + cmds: + - '' + result: + dir: '/home/opnfv/functest/results' + store_type: 'file' + file_path: 'tempest/tempest.log' + db_url: 'http://testresults.opnfv.org/test/api/v1/results?case=%s&last=1' + +First, you need to give the image that you want to use for building functest/yardstick container. +Besides, there also need some envirnment parameters such as ``INSTALLER_TYPE`` and ``INSTALLER_IP`` +and the options for you container. Then the functest/yardstick container can be build with your +settings. + +Second, there need three kinds of commands, ``pre_condition``, ``testcase`` and ``post_condition``. +If you want to do some cleanups or preparations, the commands can be put into ``pre_condition`` +section orderly. All commands in this section will just be executed once in the begining. +The ``testcase`` section does the main jobs of the testing. All functest testcases will use the +container to execute these commands one by one. After finishing that, the test is accomplished +and the results are stored in files or uploaded to database. The ``post_condition`` section +does some work such as clean Docker images or something else after all testcases finished. +All commands in this section will just execute once. + +Besides, there need a ``result`` section and it gives the directory of the functest/yardstick +results. The ``store_type`` should be the same with the cmds in ``testcase``. That means if the +test results are stored in files, then store_type need to be file and the file_path is also +needed. If the test results are uploaded to database, then a db_url is needed for acquiring the results. + +Yardstick template syntax +------------------------- + +The framework of ``yardstick_config.yml`` is almost the same as ``functest_config.yml``. + +:: + + yardstick: + image_name: opnfv/yardstick + docker_tag: latest + envs: '-e INSTALLER_TYPE=compass -e INSTALLER_IP=192.168.200.2 + -e NODE_NAME=dovetail-pod -e DEPLOY_SCENARIO=ha_nosdn + -e BUILD_TAG=dovetail -e CI_DEBUG=true -e DEPLOY_TYPE=baremetal + -e EXTERNAL_NETWORK=ext-net' + opts: '-id --privileged=true' + pre_condition: + cmds: + - 'source /home/opnfv/repos/yardstick/tests/ci/prepare_env.sh && + source /home/opnfv/repos/yardstick/tests/ci/clean_images.sh && cleanup' + - 'source /home/opnfv/repos/yardstick/tests/ci/prepare_env.sh && + cd /home/opnfv/repos/yardstick && source tests/ci/load_images.sh' + testcase: + cmds: + - 'mkdir -p /home/opnfv/yardstick/results/' + - 'cd /home/opnfv/repos/yardstick && source tests/ci/prepare_env.sh && + yardstick task start tests/opnfv/test_cases/{{script_testcase}}.yaml + --output-file /home/opnfv/yardstick/results/{{script_testcase}}.out &> + /home/opnfv/yardstick/results/yardstick.log' + post_condition: + cmds: + - '' + result: + dir: '/home/opnfv/yardstick/results' + store_type: 'file' + file_path: 'yardstick.log' + db_url: 'http://testresults.opnfv.org/test/api/v1/results?case=%s&last=1' + +The main differences between ``yardstick_config.yml`` and ``functest_config.yml`` are the commands. + +Jinja2 template syntax +---------------------- + +Note that you can use jinja2 template for your parameters such as ``{{script_testcase}}``. The +parameters are defined in ``dovetail/dovetail/conf/dovetail_config.yml``: + +:: + + parameters: + - name: testcase + path: '("name",)' + - name: script_testcase + path: '("scripts", "testcase")' + +Here ``path`` is the path in testcase config files that you can find the value of parameters. Take +``script_testcase`` as the example. For testcase dovetail.ipv6.tc001: + +:: + + dovetail.ipv6.tc001: + name: dovetail.ipv6.tc001 + objective: VIM ipv6 operations, to create/delete network, port and subnet in bulk operation + scripts: + type: functest + testcase: tempest_smoke_serial + sub_testcase_list: + - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network + - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port + - tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet + +The path ("scripts", "testcase") means 'testcase' subsection of 'scripts' section. So follow +the path ("scripts", "testcase") we can get the value of ``{{script_testcase}}`` that is +'tempest_smoke_serial'. diff --git a/docs/userguide/index.rst b/docs/userguide/index.rst index 03246e7b..0dcf55ce 100644 --- a/docs/userguide/index.rst +++ b/docs/userguide/index.rst @@ -14,3 +14,4 @@ Dovetail - user guide ./01-introduction.rst ./02-certification_criteria.rst ./03-certification_progress.rst + ./04-dovetail_config_template.rst -- cgit n357'>357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
# -*- coding: utf-8 -*-
#
# Copyright 2017 OPNFV
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

"""Classes used by collectd.py"""

import time
import os.path
import os
import re
import yaml

from opnfv.deployment import factory
import paramiko
from functest.utils import constants

ID_RSA_PATH = '/root/.ssh/id_rsa'
SSH_KEYS_SCRIPT = '/home/opnfv/barometer/baro_utils/get_ssh_keys.sh'
DEF_PLUGIN_INTERVAL = 10
COLLECTD_CONF = '/etc/collectd.conf'
COLLECTD_CONF_DIR = '/etc/collectd/collectd.conf.d'
NOTIFICATION_FILE = '/var/log/python-notifications.dump'
COLLECTD_NOTIFICATION = '/etc/collectd_notification_dump.py'
APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
APEX_USER = 'root'
APEX_USER_STACK = 'stack'
APEX_PKEY = '/root/.ssh/id_rsa'
TEST_VM_IMAGE = 'cirros-0.4.0-x86_64-disk.img'
TEST_VM_IMAGE_PATH = '/home/opnfv/functest/images/' + TEST_VM_IMAGE


class Node(object):
    """Node configuration class"""
    def __init__(self, attrs):
        self.__null = attrs[0]
        self.__id = attrs[1]
        self.__name = attrs[2]
        self.__status = attrs[3] if attrs[3] else None
        self.__taskState = attrs[4]
        self.__pwrState = attrs[5]
        self.__ip = re.sub('^[a-z]+=', '', attrs[6])

    def get_name(self):
        """Get node name"""
        return self.__name

    def get_id(self):
        """Get node ID"""
        return self.__id

    def get_ip(self):
        """Get node IP address"""
        return self.__ip

    def get_roles(self):
        """Get node role"""
        return self.__roles


def get_apex_nodes():
    handler = factory.Factory.get_handler('apex',
                                          APEX_IP,
                                          APEX_USER_STACK,
                                          APEX_PKEY)
    nodes = handler.get_nodes()
    return nodes


class ConfigServer(object):
    """Class to get env configuration"""
    def __init__(self, host, user, logger, priv_key=None):
        self.__host = host
        self.__user = user
        self.__passwd = None
        self.__priv_key = priv_key
        self.__nodes = list()
        self.__logger = logger

        self.__private_key_file = ID_RSA_PATH
        if not os.path.isfile(self.__private_key_file):
            self.__logger.error(
                "Private key file '{}'".format(self.__private_key_file)
                + " not found.")
            raise IOError("Private key file '{}' not found.".format(
                self.__private_key_file))

        # get list of available nodes
        ssh, sftp = self.__open_sftp_session(
            self.__host, self.__user, self.__passwd)
        attempt = 1
        fuel_node_passed = False

        while (attempt <= 10) and not fuel_node_passed:
            stdin, stdout, stderr = ssh.exec_command(
                "source stackrc; nova list")
            stderr_lines = stderr.readlines()
            if stderr_lines:
                self.__logger.warning(
                    "'Apex node' command failed (try {}):".format(attempt))
                for line in stderr_lines:
                    self.__logger.debug(line.strip())
            else:
                fuel_node_passed = True
                if attempt > 1:
                    self.__logger.info(
                        "'Apex node' command passed (try {})".format(attempt))
            attempt += 1
        if not fuel_node_passed:
            self.__logger.error(
                "'Apex node' command failed. This was the last try.")
            raise OSError(
                "'Apex node' command failed. This was the last try.")
        node_table = stdout.readlines()\

        # skip table title and parse table values

        for entry in node_table[3:]:
            if entry[0] == '+' or entry[0] == '\n':
                print entry
                pass
            else:
                self.__nodes.append(
                    Node([str(x.strip(' \n')) for x in entry.split('|')]))

    def get_controllers(self):
        # Get list of controllers
        print self.__nodes[0]._Node__ip
        return (
            [node for node in self.__nodes if 'controller' in node.get_name()])

    def get_computes(self):
        # Get list of computes
        return (
            [node for node in self.__nodes if 'compute' in node.get_name()])

    def get_nodes(self):
        # Get list of nodes
        return self.__nodes

    def __open_sftp_session(self, host, user, passwd=None):
        # Connect to given host.
        """Keyword arguments:
        host -- host to connect
        user -- user to use
        passwd -- password to use

        Return tuple of SSH and SFTP client instances.
        """
        # create SSH client
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        # try a direct access using password or private key
        if not passwd and not self.__priv_key:
            # get private key
            self.__priv_key = paramiko.RSAKey.from_private_key_file(
                self.__private_key_file)

        # connect to the server
        ssh.connect(
            host, username=user, password=passwd, pkey=self.__priv_key)
        sftp = ssh.open_sftp()

        # return SFTP client instance
        return ssh, sftp

    def get_plugin_interval(self, compute, plugin):
        """Find the plugin interval in collectd configuration.

        Keyword arguments:
        compute -- compute node instance
        plugin -- plug-in name

        If found, return interval value, otherwise the default value"""
        default_interval = DEF_PLUGIN_INTERVAL
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd(
                    'cat /etc/collectd/collectd.conf.d/{}.conf'.format(plugin))
                if stdout is None:
                    return default_interval
                for line in stdout.split('\n'):
                    if 'Interval' in line:
                        return 1
        return default_interval

    def get_plugin_config_values(self, compute, plugin, parameter):
        """Get parameter values from collectd config file.

        Keyword arguments:
        compute -- compute node instance
        plugin -- plug-in name
        parameter -- plug-in parameter

        Return list of found values."""
        default_values = []
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd(
                    'cat /etc/collectd/collectd.conf.d/{}.conf' .format(plugin))
                if stdout is None:
                    return default_values
                for line in stdout.split('\n'):
                    if 'Interfaces' in line:
                        return line.split(' ', 1)[1]
                    elif 'Bridges' in line:
                        return line.split(' ', 1)[1]
                    elif 'Cores' in line:
                        return line.split(' ', 1)[1]
                    else:
                        pass
        return default_values

    def execute_command(self, command, host_ip=None, ssh=None):
        """Execute command on node and return list of lines of standard output.

        Keyword arguments:
        command -- command
        host_ip -- IP of the node
        ssh -- existing open SSH session to use

        One of host_ip or ssh must not be None. If both are not None,
        existing ssh session is used.
        """
        if host_ip is None and ssh is None:
            raise ValueError('One of host_ip or ssh must not be None.')
        if ssh is None:
            ssh, sftp = self.__open_sftp_session(host_ip, 'root', 'opnfvapex')
        stdin, stdout, stderr = ssh.exec_command(command)
        return stdout.readlines()

    def get_ovs_interfaces(self, compute):
        """Get list of configured OVS interfaces

        Keyword arguments:
        compute -- compute node instance
        """
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd('sudo ovs-vsctl list-br')
        return stdout

    def is_gnocchi_running(self, controller):
        """Check whether Gnocchi is running on controller.

        Keyword arguments:
        controller -- controller node instance

        Return boolean value whether Gnocchi is running.
        """
        gnocchi_present = False
        controller_name = controller.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if controller_name == node.get_dict()['name']:
                node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
                stdout = node.run_cmd(
                    "source overcloudrc.v3;"
                    + "openstack catalog list | grep gnocchi")
                if stdout is None:
                    return False
                elif 'gnocchi' in stdout:
                    gnocchi_present = True
                    return gnocchi_present
                else:
                    return False
        return gnocchi_present

    def is_aodh_running(self, controller):
        """Check whether aodh service is running on controller
        """
        aodh_present = False
        controller_name = controller.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if controller_name == node.get_dict()['name']:
                node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
                stdout = node.run_cmd(
                    "source overcloudrc.v3;"
                    + "openstack catalog list | grep aodh")
                if stdout is None:
                    return False
                elif 'aodh' in stdout:
                    aodh_present = True
                    return aodh_present
                else:
                    return False
        return aodh_present

    def is_redis_running(self, compute):
        """Check whether redis service is running on compute"""
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd('sudo systemctl status docker'
                                      '&& sudo docker ps'
                                      '| grep barometer-redis')
                if stdout and 'barometer-redis' in stdout:
                    self.__logger.info(
                        'Redis is running in node {}'.format(
                         compute_name))
                    return True
        self.__logger.info(
            'Redis is *not* running in node {}'.format(
             compute_name))
        return False

    def is_dma_server_running(self, compute):
        """Check whether DMA server is running on compute"""
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd('sudo systemctl status docker'
                                      '&& sudo docker ps'
                                      '| grep opnfv/barometer-dma')
                if stdout and '/server' in stdout:
                    self.__logger.info(
                        'DMA Server is running in node {}'.format(
                         compute_name))
                    return True
        self.__logger.info(
            'DMA Server is *not* running in node {}'.format(
             compute_name))
        return False

    def is_dma_infofetch_running(self, compute):
        """Check whether DMA infofetch is running on compute"""
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd('sudo systemctl status docker'
                                      '&& sudo docker ps'
                                      '| grep opnfv/barometer-dma')
                if stdout and '/infofetch' in stdout:
                    self.__logger.info(
                        'DMA InfoFetch is running in node {}'.format(
                         compute_name))
                    return True
        self.__logger.info(
            'DMA InfoFetch is *not* running in node {}'.format(
             compute_name))
        return False

    def get_dma_config(self, compute):
        """Get config values of DMA"""
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                # We use following after functest accept python-toml
                #     stdout = node.run_cmd(
                #         'cat /etc/barometer-dma/config.toml')
                #     try:
                #         agent_conf = toml.loads(stdout)
                #     except (TypeError, TomlDecodeError) as e:
                #         self.__logger.error(
                #             'DMA config error: {}'.format(e))
                #         agent_conf = None
                #     finally:
                #         return agent_conf
                readcmd = (
                    'egrep "listen_port|amqp_"'
                    ' /etc/barometer-dma/config.toml'
                    '| sed -e "s/#.*$//" | sed -e "s/=/:/"'
                    )
                stdout = node.run_cmd(readcmd)
                agent_conf = {"server": yaml.safe_load(stdout)}

                pingcmd = (
                    'ping -n -c1 ' + agent_conf["server"]["amqp_host"] +
                    '| sed -ne "s/^.*bytes from //p" | sed -e "s/:.*//"'
                    )
                agent_conf["server"]["amqp_host"] = node.run_cmd(pingcmd)

                return agent_conf
        return None

    def is_mcelog_installed(self, compute, package):
        """Check whether package exists on compute node.

        Keyword arguments:
        compute -- compute node instance
        package -- Linux package to search for

        Return boolean value whether package is installed.
        """
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd(
                    'rpm -qa | grep mcelog')
                if stdout is None:
                    return 0
                elif 'mcelog' in stdout:
                    return 1
                else:
                    return 0

    def is_rdt_available(self, compute):
        """Check whether the compute node is a virtual machine."""
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd('cat /proc/cpuinfo | grep hypervisor')
                if 'hypervisor' in stdout:
                    return False
        return True

    def is_libpqos_on_node(self, compute):
        """Check whether libpqos is present on compute node"""

        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd('ls /usr/local/lib/ | grep libpqos')
                if 'libpqos' in stdout:
                    return True
        return False

    def check_aodh_plugin_included(self, compute):
        """Check if aodh plugin is included in collectd.conf file.
        If not, try to enable it.

        Keyword arguments:
        compute -- compute node instance

        Return boolean value whether AODH plugin is included
        or it's enabling was successful.
        """
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                aodh_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
                if 'aodh.conf' not in aodh_conf:
                    self.__logger.info(
                        "AODH Plugin not included in {}".format(compute_name))
                    return False
                else:
                    self.__logger.info(
                        "AODH plugin present in compute node {}" .format(
                            compute_name))
                    return True
        return True

    def check_gnocchi_plugin_included(self, compute):
        """Check if gnocchi plugin is included in collectd.conf file.
        If not, try to enable it.

        Keyword arguments:
        compute -- compute node instance

        Return boolean value whether gnocchi plugin is included
        or it's enabling was successful.
        """
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                gnocchi_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
                if 'collectd-ceilometer-plugin.conf' not in gnocchi_conf:
                    self.__logger.info(
                        "Gnocchi Plugin not included in node {}".format(
                            compute_name))
                    return False
                else:
                    self.__logger.info(
                        "Gnocchi plugin available in compute node {}" .format(
                            compute_name))
                    return True
        return True

    def check_snmp_plugin_included(self, compute):
        """Check if SNMP plugin is active in compute node.
        """
        snmp_mib = '/usr/share/snmp/mibs/Intel-Rdt.txt'
        snmp_string = 'INTEL-RDT-MIB::intelRdt'
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd(
                    'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
                        snmp_mib, snmp_string))
                self.__logger.info("snmp output = {}" .format(stdout))
                if 'OID' in stdout:
                    return False
                else:
                    return True

    def enable_plugins(
            self, compute, plugins, error_plugins, create_backup=True):
        """Enable plugins on compute node

        Keyword arguments:
        compute -- compute node instance
        plugins -- list of plugins to be enabled

        Return boolean value indicating whether function was successful.
        """
        csv_file = os.path.dirname(os.path.realpath(__file__)) + '/csv.conf'
        plugins = sorted(plugins)
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                node.put_file(csv_file, 'csv.conf')
                node.run_cmd(
                    'sudo cp csv.conf '
                    + '/etc/collectd/collectd.conf.d/csv.conf')
        return True

    def restart_collectd(self, compute):
        """Restart collectd on compute node.

        Keyword arguments:
        compute -- compute node instance

        Retrun tuple with boolean indicating success and list of warnings
        received during collectd start.
        """
        compute_name = compute.get_name()
        nodes = get_apex_nodes()

        def get_collectd_processes(compute_node):
            """Get number of running collectd processes.

            Keyword arguments:
            ssh_session -- instance of SSH session in which to check
                for processes
            """
            stdout = compute_node.run_cmd("pgrep collectd")
            return len(stdout)

        for node in nodes:
            if compute_name == node.get_dict()['name']:
                # node.run_cmd('su; "opnfvapex"')
                self.__logger.info('Stopping collectd service...')
                node.run_cmd('sudo systemctl stop collectd')
                time.sleep(10)
                if get_collectd_processes(node):
                    self.__logger.error('Collectd is still running...')
                    return False, []
                self.__logger.info('Starting collectd service...')
                stdout = node.run_cmd('sudo systemctl start collectd')
                time.sleep(10)
                warning = [
                    output.strip() for output in stdout if 'WARN: ' in output]
                if get_collectd_processes(node) == 0:
                    self.__logger.error('Collectd is still not running...')
                    return False, warning
        return True, warning

    def trigger_alarm_update(self, alarm, compute_node):
        # TODO: move these actions to main, with criteria lists so that we can reference that
        # i.e. test_plugin_with_aodh(self, compute, plugin.., logger, criteria_list, alarm_action)
        if alarm == 'mcelog':
            compute_node.run_cmd('sudo modprobe mce-inject')
            compute_node.run_cmd('sudo ./mce-inject_ea < corrected')
        if alarm == 'ovs_events':
            compute_node.run_cmd('sudo ifconfig -a | grep br0')
            compute_node.run_cmd('sudo ifconfig br0 down; sudo ifconfig br0 up')

    def test_plugins_with_aodh(
            self, compute, plugin_interval, logger,
            criteria_list=[]):

        metric_id = {}
        timestamps1 = {}
        timestamps2 = {}
        nodes = get_apex_nodes()
        compute_node = [node for node in nodes if node.get_dict()['name'] == compute][0]
        for node in nodes:
            if node.is_controller():
                self.__logger.info('Getting AODH Alarm list on {}' .format(
                    (node.get_dict()['name'])))
                node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
                self.trigger_alarm_update(criteria_list, compute_node)
                stdout = node.run_cmd(
                    "source overcloudrc.v3;"
                    + "aodh alarm list | grep {0} | grep {1}"
                    .format(criteria_list, compute))
                if stdout is None:
                    self.__logger.info("aodh alarm list was empty")
                    return False
                for line in stdout.splitlines():
                    line = line.replace('|', "")
                    metric_id = line.split()[0]
                    stdout = node.run_cmd(
                        'source overcloudrc.v3; aodh alarm show {}' .format(
                            metric_id))
                    if stdout is None:
                        self.__logger.info("aodh alarm list was empty")
                        return False
                    for line in stdout.splitlines()[3: -1]:
                        line = line.replace('|', "")
                        if line.split()[0] == 'state_timestamp':
                            timestamps1 = line.split()[1]
                    self.trigger_alarm_update(criteria_list, compute_node)
                    time.sleep(12)
                    stdout = node.run_cmd(
                        "source overcloudrc.v3; aodh alarm show {}" .format(
                            metric_id))
                    if stdout is None:
                        self.__logger.info("aodh alarm list was empty")
                        return False
                    for line in stdout.splitlines()[3:-1]:
                        line = line.replace('|', "")
                        if line.split()[0] == 'state_timestamp':
                            timestamps2 = line.split()[1]
                    if timestamps1 == timestamps2:
                        self.__logger.info(
                            "Data not updated after interval of 12 seconds")
                        return False
                    else:
                        self.__logger.info("PASS")
                        return True

    def test_plugins_with_gnocchi(
            self, compute, plugin_interval, logger,
            criteria_list=[]):

        metric_id = {}
        timestamps1 = {}
        timestamps2 = {}
        nodes = get_apex_nodes()
        if plugin_interval > 15:
            sleep_time = plugin_interval*2
        else:
            sleep_time = 30

        for node in nodes:
            if node.is_controller():
                self.__logger.info('Getting gnocchi metric list on {}' .format(
                    (node.get_dict()['name'])))
                node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
                stdout = node.run_cmd(
                    "source overcloudrc.v3;"
                    + "gnocchi metric list | grep {0} | grep {1}"
                    .format(criteria_list, compute))
                if stdout is None:
                        self.__logger.info("gnocchi list was empty")
                        return False
                for line in stdout.splitlines():
                    line = line.replace('|', "")
                    metric_id = line.split()[0]
                    stdout = node.run_cmd(
                        'source overcloudrc.v3;gnocchi measures show {}'.format(
                            metric_id))
                    if stdout is None:
                        self.__logger.info("gnocchi list was empty")
                        return False
                    for line in stdout.splitlines()[3: -1]:
                        if line[0] == '+':
                            pass
                        else:
                            timestamps1 = line.replace('|', "")
                            timestamps1 = timestamps1.split()[0]
                    time.sleep(sleep_time)
                    stdout = node.run_cmd(
                        "source overcloudrc.v3;gnocchi measures show {}".format(
                            metric_id))
                    if stdout is None:
                        self.__logger.info("gnocchi measures was empty")
                        return False
                    for line in stdout.splitlines()[3:-1]:
                        if line[0] == '+':
                            pass
                        else:
                            timestamps2 = line.replace('|', "")
                            timestamps2 = timestamps2.split()[0]
                    if timestamps1 == timestamps2:
                        self.__logger.info(
                            "Plugin Interval is {}" .format(plugin_interval))
                        self.__logger.info(
                            "Data not updated after {} seconds".format(
                                sleep_time))
                        return False
                    else:
                        self.__logger.info("PASS")
                        return True
        return False

    def test_plugins_with_snmp(
            self, compute, plugin_interval, logger, plugin, snmp_mib_files=[],
            snmp_mib_strings=[], snmp_in_commands=[]):

        if plugin in ('hugepages', 'intel_rdt', 'mcelog'):
            nodes = get_apex_nodes()
            for node in nodes:
                if compute == node.get_dict()['name']:
                    stdout = node.run_cmd(
                        'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
                            snmp_mib_files, snmp_mib_strings))
                    self.__logger.info("{}" .format(stdout))
                    if stdout is None:
                        self.__logger.info("No output from snmpwalk")
                        return False
                    elif 'OID' in stdout:
                        self.__logger.info("SNMP query failed")
                        return False
                    else:
                        counter1 = stdout.split()[3]
                    time.sleep(10)
                    stdout = node.run_cmd(
                        'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
                            snmp_mib_files, snmp_mib_strings))
                    self.__logger.info("{}" .format(stdout))
                    if stdout is None:
                        self.__logger.info("No output from snmpwalk")
                    elif 'OID' in stdout:
                        self.__logger.info(
                            "SNMP query failed during second check")
                        self.__logger.info("waiting for 10 sec")
                        time.sleep(10)
                    stdout = node.run_cmd(
                        'snmpwalk -v2c -m {0} -c public localhost {1}' .format(
                            snmp_mib_files, snmp_mib_strings))
                    self.__logger.info("{}" .format(stdout))
                    if stdout is None:
                        self.__logger.info("No output from snmpwalk")
                    elif 'OID' in stdout:
                        self.__logger.info("SNMP query failed again")
                        self.__logger.info("Failing this test case")
                        return False
                    else:
                        counter2 = stdout.split()[3]

                    if counter1 == counter2:
                        return False
                    else:
                        return True
        else:
            return False

    def check_dma_dummy_included(self, compute, name):
        """Check if dummy collectd config by DMA
           is included in collectd.conf file.

        Keyword arguments:
        compute -- compute node instance
        name -- config file name
        """
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                dummy_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
                if name + '.conf' not in dummy_conf:
                    self.__logger.error('check conf FAIL')
                    return False
                else:
                    self.__logger.info('check conf PASS')
                    fullpath = '/etc/collectd/collectd.conf.d/{}'.format(
                               name + '.conf')
                    self.__logger.info('Delete file {}'.format(fullpath))
                    node.run_cmd('sudo rm -f ' + fullpath)
                    return True
        self.__logger.error('Some panic, compute not found')
        return False

    def create_testvm(self, compute_node, test_name):
        nodes = get_apex_nodes()
        compute_name = compute_node.get_name()

        controller_node = None
        for node in nodes:
            if node.is_controller():
                controller_node = node
                break

        self.__logger.debug('Creating Test VM on {}' .format(compute_name))
        self.__logger.debug('Create command is executed in {}' .format(
            (controller_node.get_dict()['name'])))

        node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
        node.put_file(TEST_VM_IMAGE_PATH, TEST_VM_IMAGE)
        image = controller_node.run_cmd(
            'source overcloudrc.v3;'
            'openstack image create -f value -c id'
            ' --disk-format qcow2 --file {0} {1}'
            .format(TEST_VM_IMAGE, test_name))
        flavor = controller_node.run_cmd(
            'source overcloudrc.v3;'
            'openstack flavor create -f value -c id {}'
            .format(test_name))
        host = controller_node.run_cmd(
            'source overcloudrc.v3;'
            'openstack hypervisor list -f value -c "Hypervisor Hostname"'
            ' | grep "^{}\\."'
            .format(compute_name))
        server = controller_node.run_cmd(
            'source overcloudrc.v3;'
            'openstack server create -f value -c id'
            ' --image {0} --flavor {1} --availability-zone {2} {3}'
            .format(image, flavor, 'nova:' + host, test_name))

        resources = {"image": image, "flavor": flavor, "server": server}

        if server:
            self.__logger.debug('VM created')
        self.__logger.debug('VM info: {}'.format(resources))

        return resources

    def delete_testvm(self, resources):
        nodes = get_apex_nodes()

        controller_node = None
        for node in nodes:
            if node.is_controller():
                controller_node = node
                break

        self.__logger.debug('Deleteing Test VM')
        self.__logger.debug('VM to be deleted info: {}'.format(resources))
        self.__logger.debug('Delete command is executed in {}' .format(
            (controller_node.get_dict()['name'])))

        server = resources.get('server', None)
        flavor = resources.get('flavor', None)
        image = resources.get('image', None)
        if server:
            controller_node.run_cmd(
                'source overcloudrc.v3;'
                'openstack server delete {}'.format(server))
        if flavor:
            controller_node.run_cmd(
                'source overcloudrc.v3;'
                'openstack flavor delete {}'.format(flavor))
        if image:
            controller_node.run_cmd(
                'source overcloudrc.v3;'
                'openstack image delete {}'.format(image))

        self.__logger.debug('VM and other OpenStack resources deleted')

    def test_dma_infofetch_get_data(self, compute, test_name):
        compute_name = compute.get_name()
        nodes = get_apex_nodes()
        for node in nodes:
            if compute_name == node.get_dict()['name']:
                stdout = node.run_cmd(
                    'redis-cli keys "barometer-dma/vm/*/vminfo"'
                    ' | while read k; do redis-cli get $k; done'
                    ' | grep {}'.format(test_name))
                self.__logger.debug('InfoFetch data: {}'.format(stdout))
                if stdout and test_name in stdout:
                    self.__logger.info('PASS')
                    return True
                else:
                    self.__logger.info('No test vm info')

        self.__logger.info('FAIL')
        return False