summaryrefslogtreecommitdiffstats
path: root/tosca2heat/heat-translator/translator/tests/data/hot_output/hot_script_types.yaml
blob: 5f0585d0c3af94a8ab5054188a5808511519659e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
heat_template_version: 2013-05-23

description: >
  TOSCA template to test usage of different script types like Ansible and Puppet
  one.

parameters: {}
resources:
  customwebserver2_create_deploy:
    type: OS::Heat::SoftwareDeployment
    properties:
      config:
        get_resource: customwebserver2_create_config
      server:
        get_resource: server
  customwebserver_create_deploy:
    type: OS::Heat::SoftwareDeployment
    properties:
      config:
        get_resource: customwebserver_create_config
      server:
        get_resource: server
  server:
    type: OS::Nova::Server
    properties:
      flavor: m1.small
      image: ubuntu-12.04-software-config-os-init
      user_data_format: SOFTWARE_CONFIG
  customwebserver2_start_config:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        get_file: start.sh
      group: script
  customwebserver2_start_deploy:
    type: OS::Heat::SoftwareDeployment
    properties:
      config:
        get_resource: customwebserver2_start_config
      server:
        get_resource: server
    depends_on:
    - customwebserver2_configure_deploy
  customwebserver2_create_config:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        get_file: install.sh
      group: script
  customwebserver2_configure_config:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        get_file: configure.py
      group: script
  customwebserver2_configure_deploy:
    type: OS::Heat::SoftwareDeployment
    properties:
      config:
        get_resource: customwebserver2_configure_config
      server:
        get_resource: server
    depends_on:
    - customwebserver2_create_deploy
  customwebserver_start_config:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        get_file: start.pp
      group: puppet
  customwebserver_start_deploy:
    type: OS::Heat::SoftwareDeployment
    properties:
      config:
        get_resource: customwebserver_start_config
      server:
        get_resource: server
    depends_on:
    - customwebserver_configure_deploy
  customwebserver_create_config:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        get_file: install.yaml
      group: ansible
  customwebserver_configure_config:
    type: OS::Heat::SoftwareConfig
    properties:
      config:
        get_file: configure.yml
      group: ansible
  customwebserver_configure_deploy:
    type: OS::Heat::SoftwareDeployment
    properties:
      config:
        get_resource: customwebserver_configure_config
      server:
        get_resource: server
    depends_on:
    - customwebserver_create_deploy
outputs: {}
8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
# Copyright 2016-2017 Spirent Communications.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Invalid name of file, must be used '_' instead '-'
# pylint: disable=invalid-name
'''
@author Spirent Communications

This test automates the RFC2544 tests using the Spirent
TestCenter REST APIs. This test supports Python 3.4

'''
import argparse
import logging
import os
from conf import settings


_LOGGER = logging.getLogger(__name__)


def create_dir(path):
    """Create the directory as specified in path """
    if not os.path.exists(path):
        try:
            os.makedirs(path)
        except OSError as ex:
            _LOGGER.error("Failed to create directory %s: %s", path, str(ex))
            raise


def write_query_results_to_csv(results_path, csv_results_file_prefix,
                               query_results):
    """ Write the results of the query to the CSV """
    create_dir(results_path)
    filec = os.path.join(results_path, csv_results_file_prefix + ".csv")
    with open(filec, "wb") as result_file:
        result_file.write(query_results["Columns"].replace(" ", ",") + "\n")
        for row in (query_results["Output"].replace("} {", ",").
                    replace("{", "").replace("}", "").split(",")):
            result_file.write(row.replace(" ", ",") + "\n")


def positive_int(value):
    """ Positive Integer type for Arguments """
    ivalue = int(value)
    if ivalue <= 0:
        raise argparse.ArgumentTypeError(
            "%s is an invalid positive int value" % value)
    return ivalue


def percent_float(value):
    """ Floating type for Arguments """
    pvalue = float(value)
    if pvalue < 0.0 or pvalue > 100.0:
        raise argparse.ArgumentTypeError(
            "%s not in range [0.0, 100.0]" % pvalue)
    return pvalue

# pylint: disable=too-many-branches, too-many-statements
def main():
    """ Read the arguments, Invoke Test and Return the results"""
    parser = argparse.ArgumentParser()
    # Required parameters
    required_named = parser.add_argument_group("required named arguments")
    required_named.add_argument("--lab_server_addr",
                                required=True,
                                help=("The IP address of the"
                                      "Spirent Lab Server"),
                                dest="lab_server_addr")
    required_named.add_argument("--license_server_addr",
                                required=True,
                                help=("The IP address of the Spirent"
                                      "License Server"),
                                dest="license_server_addr")
    required_named.add_argument("--east_chassis_addr",
                                required=True,
                                help=("The TestCenter chassis IP address to"
                                      "use for the east test port"),
                                dest="east_chassis_addr")
    required_named.add_argument("--east_slot_num",
                                type=positive_int,
                                required=True,
                                help=("The TestCenter slot number to"
                                      "use for the east test port"),
                                dest="east_slot_num")
    required_named.add_argument("--east_port_num",
                                type=positive_int,
                                required=True,
                                help=("The TestCenter port number to use"
                                      "for the east test port"),
                                dest="east_port_num")
    required_named.add_argument("--west_chassis_addr",
                                required=True,
                                help=("The TestCenter chassis IP address"
                                      "to use for the west test port"),
                                dest="west_chassis_addr")
    required_named.add_argument("--west_slot_num",
                                type=positive_int,
                                required=True,
                                help=("The TestCenter slot number to use"
                                      "for the west test port"),
                                dest="west_slot_num")
    required_named.add_argument("--west_port_num",
                                type=positive_int,
                                required=True,
                                help=("The TestCenter port number to"
                                      "use for the west test port"),
                                dest="west_port_num")
    # Optional parameters
    optional_named = parser.add_argument_group("optional named arguments")
    optional_named.add_argument("--metric",
                                required=False,
                                help=("One among - throughput, latency,\
                                      backtoback and frameloss"),
                                choices=["throughput", "latency",
                                         "backtoback", "frameloss"],
                                default="throughput",
                                dest="metric")
    optional_named.add_argument("--test_session_name",
                                required=False,
                                default="RFC2544 East-West Throughput",
                                help=("The friendly name to identify"
                                      "the Spirent Lab Server test session"),
                                dest="test_session_name")

    optional_named.add_argument("--test_user_name",
                                required=False,
                                default="RFC2544 East-West User",
                                help=("The friendly name to identify the"
                                      "Spirent Lab Server test user"),
                                dest="test_user_name")
    optional_named.add_argument("--results_dir",
                                required=False,
                                default=settings.getValue("TRAFFICGEN_STC_RESULTS_DIR"),
                                help="The directory to copy results to",
                                dest="results_dir")
    optional_named.add_argument("--csv_results_file_prefix",
                                required=False,
                                default="Rfc2544Tput",
                                help="The prefix for the CSV results files",
                                dest="csv_results_file_prefix")
    optional_named.add_argument("--num_trials",
                                type=positive_int,
                                required=False,
                                default=1,
                                help=("The number of trials to execute during"
                                      "the test"),
                                dest="num_trials")
    optional_named.add_argument("--trial_duration_sec",
                                type=positive_int,
                                required=False,
                                default=60,
                                help=("The duration of each trial executed"
                                      "during the test"),
                                dest="trial_duration_sec")
    optional_named.add_argument("--traffic_pattern",
                                required=False,
                                choices=["BACKBONE", "MESH", "PAIR"],
                                default="PAIR",
                                help="The traffic pattern between endpoints",
                                dest="traffic_pattern")
    optional_named.add_argument("--traffic_custom",
                                required=False,
                                default=None,
                                help="The traffic pattern between endpoints",
                                dest="traffic_custom")
    optional_named.add_argument("--search_mode",
                                required=False,
                                choices=["COMBO", "STEP", "BINARY"],
                                default="BINARY",
                                help=("The search mode used to find the"
                                      "throughput rate"),
                                dest="search_mode")
    optional_named.add_argument("--learning_mode",
                                required=False,
                                choices=["AUTO", "L2_LEARNING",
                                         "L3_LEARNING", "NONE"],
                                default="AUTO",
                                help=("The learning mode used during the test,"
                                      "default is 'NONE'"),
                                dest="learning_mode")
    optional_named.add_argument("--rate_lower_limit_pct",
                                type=percent_float,
                                required=False,
                                default=1.0,
                                help=("The minimum percent line rate that"
                                      "will be used during the test"),
                                dest="rate_lower_limit_pct")
    optional_named.add_argument("--rate_upper_limit_pct",
                                type=percent_float,
                                required=False,
                                default=99.0,
                                help=("The maximum percent line rate that"
                                      "will be used during the test"),
                                dest="rate_upper_limit_pct")
    optional_named.add_argument("--rate_initial_pct",
                                type=percent_float,
                                required=False,
                                default=99.0,
                                help=("If Search Mode is BINARY, the percent"
                                      "line rate that will be used at the"
                                      "start of the test"),
                                dest="rate_initial_pct")
    optional_named.add_argument("--rate_step_pct",
                                type=percent_float,
                                required=False,
                                default=10.0,
                                help=("If SearchMode is STEP, the percent"
                                      "load increase per step"),
                                dest="rate_step_pct")
    optional_named.add_argument("--resolution_pct",
                                type=percent_float,
                                required=False,
                                default=1.0,
                                help=("The minimum percentage of load"
                                      "adjustment between iterations"),
                                dest="resolution_pct")
    optional_named.add_argument("--frame_size_list",
                                type=lambda s: [int(item)
                                                for item in s.split(',')],
                                required=False,
                                default=[256],
                                help="A comma-delimited list of frame sizes",
                                dest="frame_size_list")
    optional_named.add_argument("--acceptable_frame_loss_pct",
                                type=percent_float,
                                required=False,
                                default=0.0,
                                help=("The maximum acceptable frame loss"
                                      "percent in any iteration"),
                                dest="acceptable_frame_loss_pct")
    optional_named.add_argument("--east_intf_addr",
                                required=False,
                                default="192.85.1.3",
                                help=("The address to assign to the first"
                                      "emulated device interface on the first"
                                      "east port"),
                                dest="east_intf_addr")
    optional_named.add_argument("--east_intf_gateway_addr",
                                required=False,
                                default="192.85.1.53",
                                help=("The gateway address to assign to the"
                                      "first emulated device interface on the"
                                      "first east port"),
                                dest="east_intf_gateway_addr")
    optional_named.add_argument("--west_intf_addr",
                                required=False,
                                default="192.85.1.53",
                                help=("The address to assign to the first"
                                      "emulated device interface on the"
                                      "first west port"),
                                dest="west_intf_addr")
    optional_named.add_argument("--west_intf_gateway_addr",
                                required=False,
                                default="192.85.1.53",
                                help=("The gateway address to assign to"
                                      "the first emulated device interface"
                                      "on the first west port"),
                                dest="west_intf_gateway_addr")
    parser.add_argument("-v",
                        "--verbose",
                        required=False,
                        default=True,
                        help="More output during operation when present",
                        action="store_true",
                        dest="verbose")
    args = parser.parse_args()

    if args.verbose:
        _LOGGER.debug("Creating results directory")
    create_dir(args.results_dir)

    session_name = args.test_session_name
    user_name = args.test_user_name
    # pylint: disable=import-error
    try:
        # Load Spirent REST Library
        from stcrestclient import stchttp

        stc = stchttp.StcHttp(args.lab_server_addr)
        session_id = stc.new_session(user_name, session_name)
        stc.join_session(session_id)
    except RuntimeError as err:
        _LOGGER.error(err)
        raise

    # Get STC system info.
    tx_port_loc = "//%s/%s/%s" % (args.east_chassis_addr,
                                  args.east_slot_num,
                                  args.east_port_num)
    rx_port_loc = "//%s/%s/%s" % (args.west_chassis_addr,
                                  args.west_slot_num,
                                  args.west_port_num)

    # Retrieve and display the server information
    if args.verbose:
        _LOGGER.debug("SpirentTestCenter system version: %s",
                      stc.get("system1", "version"))

    try:
        device_list = []
        port_list = []
        if args.verbose:
            _LOGGER.debug("Bring up license server")
        license_mgr = stc.get("system1", "children-licenseservermanager")
        if args.verbose:
            _LOGGER.debug("license_mgr = %s", license_mgr)
        stc.create("LicenseServer", under=license_mgr, attributes={
            "server": args.license_server_addr})

        # Create the root project object
        if args.verbose:
            _LOGGER.debug("Creating project ...")
        project = stc.get("System1", "children-Project")

        # Configure any custom traffic parameters
        if args.traffic_custom == "cont":
            if args.verbose:
                _LOGGER.debug("Configure Continuous Traffic")
            stc.create("ContinuousTestConfig", under=project)

        # Create ports
        if args.verbose:
            _LOGGER.debug("Creating ports ...")
        east_chassis_port = stc.create('port', project)
        if args.verbose:
            _LOGGER.debug("Configuring TX port ...")
        stc.config(east_chassis_port, {'location': tx_port_loc})
        port_list.append(east_chassis_port)

        west_chassis_port = stc.create('port', project)
        if args.verbose:
            _LOGGER.debug("Configuring RX port ...")
        stc.config(west_chassis_port, {'location': rx_port_loc})
        port_list.append(west_chassis_port)

        # Create emulated genparam for east port
        east_device_gen_params = stc.create("EmulatedDeviceGenParams",
                                            under=project,
                                            attributes={"Port":
                                                        east_chassis_port})
        # Create the DeviceGenEthIIIfParams object
        stc.create("DeviceGenEthIIIfParams",
                   under=east_device_gen_params)
        # Configuring Ipv4 interfaces
        stc.create("DeviceGenIpv4IfParams",
                   under=east_device_gen_params,
                   attributes={"Addr": args.east_intf_addr,
                               "Gateway": args.east_intf_gateway_addr})
        # Create Devices using the Device Wizard
        device_gen_config = stc.perform("DeviceGenConfigExpand",
                                        params={"DeleteExisting": "No",
                                                "GenParams":
                                                east_device_gen_params})
        # Append to the device list
        device_list.append(device_gen_config['ReturnList'])

        # Create emulated genparam for west port
        west_device_gen_params = stc.create("EmulatedDeviceGenParams",
                                            under=project,
                                            attributes={"Port":
                                                        west_chassis_port})
        # Create the DeviceGenEthIIIfParams object
        stc.create("DeviceGenEthIIIfParams",
                   under=west_device_gen_params)
        # Configuring Ipv4 interfaces
        stc.create("DeviceGenIpv4IfParams",
                   under=west_device_gen_params,
                   attributes={"Addr": args.west_intf_addr,
                               "Gateway": args.west_intf_gateway_addr})
        # Create Devices using the Device Wizard
        device_gen_config = stc.perform("DeviceGenConfigExpand",
                                        params={"DeleteExisting": "No",
                                                "GenParams":
                                                west_device_gen_params})
        # Append to the device list
        device_list.append(device_gen_config['ReturnList'])
        if args.verbose:
            _LOGGER.debug(device_list)

        # Create the RFC 2544 'metric test
        if args.metric == "throughput":
            if args.verbose:
                _LOGGER.debug("Set up the RFC2544 throughput test...")
            stc.perform("Rfc2544SetupThroughputTestCommand",
                        params={"AcceptableFrameLoss":
                                args.acceptable_frame_loss_pct,
                                "Duration": args.trial_duration_sec,
                                "FrameSizeList": args.frame_size_list,
                                "LearningMode": args.learning_mode,
                                "NumOfTrials": args.num_trials,
                                "RateInitial": args.rate_initial_pct,
                                "RateLowerLimit": args.rate_lower_limit_pct,
                                "RateStep": args.rate_step_pct,
                                "RateUpperLimit": args.rate_upper_limit_pct,
                                "Resolution": args.resolution_pct,
                                "SearchMode": args.search_mode,
                                "TrafficPattern": args.traffic_pattern})
        elif args.metric == "backtoback":
            stc.perform("Rfc2544SetupBackToBackTestCommand",
                        params={"AcceptableFrameLoss":
                                args.acceptable_frame_loss_pct,
                                "Duration": args.trial_duration_sec,
                                "FrameSizeList": args.frame_size_list,
                                "LearningMode": args.learning_mode,
                                "LatencyType": args.latency_type,
                                "NumOfTrials": args.num_trials,
                                "RateInitial": args.rate_initial_pct,
                                "RateLowerLimit": args.rate_lower_limit_pct,
                                "RateStep": args.rate_step_pct,
                                "RateUpperLimit": args.rate_upper_limit_pct,
                                "Resolution": args.resolution_pct,
                                "SearchMode": args.search_mode,
                                "TrafficPattern": args.traffic_pattern})
        elif args.metric == "frameloss":
            stc.perform("Rfc2544SetupFrameLossTestCommand",
                        params={"AcceptableFrameLoss":
                                args.acceptable_frame_loss_pct,
                                "Duration": args.trial_duration_sec,
                                "FrameSizeList": args.frame_size_list,
                                "LearningMode": args.learning_mode,
                                "LatencyType": args.latency_type,
                                "NumOfTrials": args.num_trials,
                                "RateInitial": args.rate_initial_pct,
                                "RateLowerLimit": args.rate_lower_limit_pct,
                                "RateStep": args.rate_step_pct,
                                "RateUpperLimit": args.rate_upper_limit_pct,
                                "Resolution": args.resolution_pct,
                                "SearchMode": args.search_mode,
                                "TrafficPattern": args.traffic_pattern})
        elif args.metric == "latency":
            stc.perform("Rfc2544SetupLatencyTestCommand",
                        params={"AcceptableFrameLoss":
                                args.acceptable_frame_loss_pct,
                                "Duration": args.trial_duration_sec,
                                "FrameSizeList": args.frame_size_list,
                                "LearningMode": args.learning_mode,
                                "LatencyType": args.latency_type,
                                "NumOfTrials": args.num_trials,
                                "RateInitial": args.rate_initial_pct,
                                "RateLowerLimit": args.rate_lower_limit_pct,
                                "RateStep": args.rate_step_pct,
                                "RateUpperLimit": args.rate_upper_limit_pct,
                                "Resolution": args.resolution_pct,
                                "SearchMode": args.search_mode,
                                "TrafficPattern": args.traffic_pattern})

        # Save the configuration
        stc.perform("SaveToTcc", params={"Filename": "2544.tcc"})
        # Connect to the hardware...
        stc.perform("AttachPorts", params={"portList": stc.get(
            "system1.project", "children-port"), "autoConnect": "TRUE"})
        # Apply configuration.
        if args.verbose:
            _LOGGER.debug("Apply configuration...")
        stc.apply()

        if args.verbose:
            _LOGGER.debug("Starting the sequencer...")
        stc.perform("SequencerStart")

        # Wait for sequencer to finish
        _LOGGER.info(
            "Starting test... Please wait for the test to complete...")
        stc.wait_until_complete()
        _LOGGER.info("The test has completed... Saving results...")

        # Determine what the results database filename is...
        lab_server_resultsdb = stc.get(
            "system1.project.TestResultSetting", "CurrentResultFileName")

        if args.verbose:
            _LOGGER.debug("The lab server results database is %s",
                          lab_server_resultsdb)

        stc.perform("CSSynchronizeFiles",
                    params={"DefaultDownloadDir": args.results_dir})

        resultsdb = args.results_dir + \
            lab_server_resultsdb.split("/Results")[1]

        if not os.path.exists(resultsdb):
            resultsdb = lab_server_resultsdb
            _LOGGER.info("Failed to create the local summary DB File, using"
                         " the remote DB file instead.")
        else:
            _LOGGER.info(
                "The local summary DB file has been saved to %s", resultsdb)

        # The returns the "RFC2544ThroughputTestResultDetailedSummaryView"
        # table view from the results database.
        # There are other views available.

        if args.metric == "throughput":
            resultsdict = (
                stc.perform("QueryResult",
                            params={
                                "DatabaseConnectionString":
                                resultsdb,
                                "ResultPath":
                                ("RFC2544ThroughputTestResultDetailed"
                                 "SummaryView")}))

        # The returns the "RFC2544BacktoBackTestResultDetailedSummaryView"
        # table view from the results database.
        # There are other views available.
        elif args.metric == "backtoback":
            resultsdict = (
                stc.perform("QueryResult",
                            params={
                                "DatabaseConnectionString":
                                resultsdb,
                                "ResultPath":
                                ("RFC2544Back2BackTestResultDetailed"
                                 "SummaryView")}))

        # The returns the "RFC2544LatencyTestResultDetailedSummaryView"
        # table view from the results database.
        # There are other views available.
        elif args.metric == "latency":
            resultsdict = (
                stc.perform("QueryResult",
                            params={
                                "DatabaseConnectionString":
                                resultsdb,
                                "ResultPath":
                                ("RFC2544LatencyTestResultDetailed"
                                 "SummaryView")}))

        # The returns the "RFC2544FrameLossTestResultDetailedSummaryView"
        # table view from the results database.
        # There are other views available.
        elif args.metric == "frameloss":
            resultsdict = (
                stc.perform("QueryResult",
                            params={
                                "DatabaseConnectionString":
                                resultsdb,
                                "ResultPath":
                                ("RFC2544FrameLossTestResultDetailed"
                                 "SummaryView")}))
        if args.verbose:
            _LOGGER.debug("resultsdict[\"Columns\"]: %s",
                          resultsdict["Columns"])
            _LOGGER.debug("resultsdict[\"Output\"]: %s", resultsdict["Output"])
            _LOGGER.debug("Result paths: %s",
                          stc.perform("GetTestResultSettingPaths"))

            # Write results to csv
            _LOGGER.debug("Writing CSV file to results directory %s",
                          args.results_dir)
        write_query_results_to_csv(
            args.results_dir, args.csv_results_file_prefix, resultsdict)

    except RuntimeError as e:
        _LOGGER.error(e)

    if args.verbose:
        _LOGGER.debug("Destroy session on lab server")
    stc.end_session()

    _LOGGER.info("Test complete!")

if __name__ == "__main__":
    main()