summaryrefslogtreecommitdiffstats
path: root/vstf/vstf/common
diff options
context:
space:
mode:
authorYiting.Li <liyiting@huawei.com>2015-12-22 17:11:12 -0800
committerYiting.Li <liyiting@huawei.com>2015-12-22 17:11:12 -0800
commit8f1101df131a4d3e03b377738507d88b745831c0 (patch)
tree73f140474fcec2a77c85a453f6946957ca0742d1 /vstf/vstf/common
parent1a24ebbda3f95600c0e7d5ed8661317a8ff7e265 (diff)
Upload the contribution of vstf as bottleneck network framework.
End to End Performance test JIRA:BOTTLENECK-29 Change-Id: Ib2c553c8b60d6cda9e7a7b52b737c9139f706ebd Signed-off-by: Yiting.Li <liyiting@huawei.com>
Diffstat (limited to 'vstf/vstf/common')
-rwxr-xr-xvstf/vstf/common/__init__.py14
-rwxr-xr-xvstf/vstf/common/cfgparser.py27
-rwxr-xr-xvstf/vstf/common/check.py23
-rwxr-xr-xvstf/vstf/common/cliutil.py27
-rwxr-xr-xvstf/vstf/common/cmds.py19
-rwxr-xr-xvstf/vstf/common/constants.py66
-rwxr-xr-xvstf/vstf/common/daemon.py145
-rwxr-xr-xvstf/vstf/common/decorator.py88
-rwxr-xr-xvstf/vstf/common/excepts.py12
-rwxr-xr-xvstf/vstf/common/input.py20
-rwxr-xr-xvstf/vstf/common/log.py41
-rwxr-xr-xvstf/vstf/common/message.py141
-rwxr-xr-xvstf/vstf/common/perfmark.py14
-rwxr-xr-xvstf/vstf/common/pyhtml.py215
-rwxr-xr-xvstf/vstf/common/rsync.py518
-rwxr-xr-xvstf/vstf/common/saltstack.py190
-rwxr-xr-xvstf/vstf/common/ssh.py230
-rwxr-xr-xvstf/vstf/common/test_func.py14
-rwxr-xr-xvstf/vstf/common/unix.py53
-rwxr-xr-xvstf/vstf/common/utils.py247
-rwxr-xr-xvstf/vstf/common/vstfcli.py62
21 files changed, 2166 insertions, 0 deletions
diff --git a/vstf/vstf/common/__init__.py b/vstf/vstf/common/__init__.py
new file mode 100755
index 00000000..89dcd4e2
--- /dev/null
+++ b/vstf/vstf/common/__init__.py
@@ -0,0 +1,14 @@
+# Copyright Huawei Technologies Co., Ltd. 1998-2015.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the License); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/vstf/vstf/common/cfgparser.py b/vstf/vstf/common/cfgparser.py
new file mode 100755
index 00000000..3d50a1b1
--- /dev/null
+++ b/vstf/vstf/common/cfgparser.py
@@ -0,0 +1,27 @@
+"""
+Created on 2015-8-5
+
+@author: c00225995
+"""
+import os
+from oslo.config import cfg
+
+
+class CfgParser(object):
+ def __init__(self, config_file):
+ super(CfgParser, self).__init__()
+ if os.path.isfile(config_file) is False:
+ raise Exception('The config file not found <%s>' % config_file)
+ self.config_file = config_file
+ self.CONF = cfg.ConfigOpts()
+
+ def register_my_opts(self, opts, name=None):
+ if name:
+ self.CONF.register_opts(opts, name)
+ else:
+ self.CONF.register_opts(opts)
+
+ def parse(self):
+ # self.register_my_opts(opts, name=name)
+ self.CONF(args=[], default_config_files=[self.config_file])
+ return self.CONF
diff --git a/vstf/vstf/common/check.py b/vstf/vstf/common/check.py
new file mode 100755
index 00000000..9b2f3d04
--- /dev/null
+++ b/vstf/vstf/common/check.py
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author: wly
+# date: 2015-11-5
+# see license for license details
+
+import logging
+import vstf.common.constants as cst
+
+LOG = logging.getLogger(__name__)
+
+
+def check_case_params(protocol, typ, tool):
+ if "throughput" == typ:
+ return False, "Not support 'throughput' at this version"
+ if "tcp" == protocol:
+ if tool in ["pktgen", "netmap"]:
+ return False, "%s cant support tcp test" % tool
+ if "qperf" == tool and "latency" != typ:
+ return False, "qperf support latency test only, cant support %s" % typ
+ if "latency" == typ and tool not in ["netperf", "qperf"]:
+ return False, "%s cant support latency test" % tool
+ return True, "support successfully"
diff --git a/vstf/vstf/common/cliutil.py b/vstf/vstf/common/cliutil.py
new file mode 100755
index 00000000..91ff7f18
--- /dev/null
+++ b/vstf/vstf/common/cliutil.py
@@ -0,0 +1,27 @@
+def arg(*args, **kwargs):
+ """Decorator for CLI args.
+
+ Example:
+
+ >>> @arg("name", help="Name of the new entity")
+ ... def entity_create(args):
+ ... pass
+ """
+ def _decorator(func):
+ add_arg(func, *args, **kwargs)
+ return func
+ return _decorator
+
+
+def add_arg(func, *args, **kwargs):
+ """Bind CLI arguments to a shell.py `do_foo` function."""
+
+ if not hasattr(func, 'arguments'):
+ func.arguments = []
+
+ # NOTE(sirp): avoid dups that can occur when the module is shared across
+ # tests.
+ if (args, kwargs) not in func.arguments:
+ # Because of the semantics of decorator composition if we just append
+ # to the options list positional options will appear to be backwards.
+ func.arguments.insert(0, (args, kwargs)) \ No newline at end of file
diff --git a/vstf/vstf/common/cmds.py b/vstf/vstf/common/cmds.py
new file mode 100755
index 00000000..c30f2be6
--- /dev/null
+++ b/vstf/vstf/common/cmds.py
@@ -0,0 +1,19 @@
+import commands
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+def execute(cmd=None, care_result=True):
+ if not cmd:
+ LOG.error('The cmd is None')
+ return None
+ try:
+ (status, ret) = commands.getstatusoutput(cmd)
+ if care_result and 0 != status:
+ LOG.error('CMD<%(cmd)s> \nSTDOUT:\n%(ret)s.', {'cmd':cmd, 'ret':ret})
+ return None
+ else:
+ return ret
+ except Exception as e:
+ raise e
diff --git a/vstf/vstf/common/constants.py b/vstf/vstf/common/constants.py
new file mode 100755
index 00000000..1cace390
--- /dev/null
+++ b/vstf/vstf/common/constants.py
@@ -0,0 +1,66 @@
+slave_project_path = "/opt/esp-atf"
+VSTFCPATH = "/opt/vstf"
+sockaddr = VSTFCPATH + "/vstf.socket"
+vstf_pid = VSTFCPATH + "/vstf-server.pid"
+buff_size = 1024
+
+# the message's len must be < 9999999999
+MSG_FLAG_LEN = 10
+MSG_FLAG = "%010d"
+
+# all command run timeout
+TIMEOUT = 20
+# timmer SECOND
+TICK = 3
+
+HW_INFO = "HW_INFO"
+CPU_INFO = "CPU INFO"
+MEMORY_INFO = "MEMORY INFO"
+OS_INFO = "OS INFO"
+
+TOOLS = ["pktgen", "netperf", "qperf", "netmap"]
+OPERATIONS = ["start", "stop", "restart"]
+ACTIONS = ["send", "receive"]
+PROTOCOLS = ["tcp_lat", "udp_lat", "tcp_bw", "udp_bw"]
+TPROTOCOLS = ["tcp", "udp"]
+PROFILES = ["rdp", "fastlink", "l2switch"]
+TTYPES = ["throughput", "latency", "frameloss"]
+SCENARIOS = ["Ti", "Tn", "Tnv", "Tu"]
+SOCKET_BUF = 102400
+WAIT_BALANCE = 2
+CPU_USAGE_ROUND = 2
+PKTLOSS_ROUND = 2
+RATEP_ROUND = 3
+TIME_ROUND = 3
+TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
+TIME_STR = "%Y%m%d_%H%M%S"
+REPORT_DEFAULTS = "/tmp"
+
+CASE_ACTOR_MAP = {
+ # unidirection
+ "Tn-1": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Tn-2": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+ # unidirection with vxlan
+ "Tn-3": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Tn-4": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+ # unidirection
+ "Tnv-1": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Tnv-2": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+ # unidirection with vxlan
+ "Tnv-3": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Tnv-4": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+
+ "Ti-1": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Ti-2": {"senders": [-1], "receivers": [0], "flows": 1},
+ "Ti-3": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+ "Ti-4": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Ti-5": {"senders": [-1], "receivers": [0], "flows": 1},
+ "Ti-6": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+
+ "Tu-1": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Tu-2": {"senders": [-1], "receivers": [0], "flows": 1},
+ "Tu-3": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2},
+ "Tu-4": {"senders": [0], "receivers": [-1], "flows": 1},
+ "Tu-5": {"senders": [-1], "receivers": [0], "flows": 1},
+ "Tu-6": {"senders": [0, -1], "receivers": [-1, 0], "flows": 2}
+}
diff --git a/vstf/vstf/common/daemon.py b/vstf/vstf/common/daemon.py
new file mode 100755
index 00000000..8a298614
--- /dev/null
+++ b/vstf/vstf/common/daemon.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python
+import sys, os, time, atexit
+import logging
+from signal import SIGTERM
+
+LOG = logging.getLogger(__name__)
+
+
+class Daemon(object):
+ """
+ A generic daemon class.
+
+ Usage: subclass the Daemon class and override the run() method
+ """
+ def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
+ super(Daemon, self).__init__()
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+ self.pidfile = pidfile
+
+ def daemonize(self):
+ """
+ do the UNIX double-fork magic, see Stevens' "Advanced
+ Programming in the UNIX Environment" for details (ISBN 0201563177)
+ http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
+ """
+ try:
+ pid = os.fork()
+ if pid > 0:
+ sys.exit(0)
+ except OSError, e:
+ LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
+ {'errno':e.errno, 'strerror': e.strerror})
+ sys.exit(1)
+
+ # decouple from parent environment
+ os.chdir("/")
+ os.setsid()
+ os.umask(0)
+
+ # do second fork
+ try:
+ pid = os.fork()
+ if pid > 0:
+ # exit from second parent
+ sys.exit(0)
+ except OSError, e:
+ LOG.error("fork #1 failed: %(errno)s, %(strerror)s",
+ {'errno':e.errno, 'strerror': e.strerror})
+ sys.exit(1)
+
+ # redirect standard file descriptors
+ sys.stdout.flush()
+ sys.stderr.flush()
+ si = file(self.stdin, 'r')
+ so = file(self.stdout, 'a+')
+ se = file(self.stderr, 'a+', 0)
+ os.dup2(si.fileno(), sys.stdin.fileno())
+ os.dup2(so.fileno(), sys.stdout.fileno())
+ os.dup2(se.fileno(), sys.stderr.fileno())
+
+ # write pidfile
+ atexit.register(self.delpid)
+ pid = str(os.getpid())
+ file(self.pidfile,'w+').write("%s\n" % pid)
+
+ def delpid(self):
+ os.remove(self.pidfile)
+
+ def start(self):
+ """
+ Start the daemon
+ """
+
+ # Check for a pidfile to see if the daemon already runs
+ try:
+ pf = file(self.pidfile,'r')
+ pid = int(pf.read().strip())
+ pf.close()
+ except IOError:
+ pid = None
+
+ if pid:
+ message = "pidfile %s already exist. Daemon already running?\n"
+ sys.stderr.write(message % self.pidfile)
+ sys.exit(1)
+ LOG.info("daemon start to run daemonize")
+ # Start the daemon
+ self.daemonize()
+ self.run()
+
+ def stop(self):
+ """
+ Stop the daemon
+ """
+ # Get the pid from the pidfile
+ try:
+ pf = file(self.pidfile,'r')
+ pid = int(pf.read().strip())
+ pf.close()
+ except IOError:
+ pid = None
+
+ if not pid:
+ message = "pidfile %s does not exist. Daemon not running?\n"
+ sys.stderr.write(message % self.pidfile)
+ return # not an error in a restart
+
+ # Try killing the daemon process
+ try:
+ while 1:
+ os.kill(pid, SIGTERM)
+ time.sleep(0.1)
+ except OSError, err:
+ err = str(err)
+ if err.find("No such process") > 0:
+ if os.path.exists(self.pidfile):
+ os.remove(self.pidfile)
+ else:
+ print str(err)
+ sys.exit(1)
+
+ def restart(self):
+ """
+ Restart the daemon
+ """
+ self.stop()
+ self.start()
+
+ def run(self):
+ """
+ You should override this method when you subclass Daemon.
+ It will be called after the process has been
+ daemonized by start() or restart().
+
+ """
+ pass
+
+ def daemon_die(self):
+ """You should this method when you shutdown daemon
+ this func will be call by stop() before kill the process
+
+ """
+ pass \ No newline at end of file
diff --git a/vstf/vstf/common/decorator.py b/vstf/vstf/common/decorator.py
new file mode 100755
index 00000000..cafbb3ff
--- /dev/null
+++ b/vstf/vstf/common/decorator.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author: wly
+# date: 2015-09-09
+# see license for license details
+_DEFAULTS = "vstf check key defaults".encode()
+
+
+def check(key, choices=[], defaults=_DEFAULTS):
+ def _deco(func):
+ def __deco(*args, **kwargs):
+ if key not in kwargs:
+ if defaults != _DEFAULTS:
+ kwargs[key] = defaults
+ else:
+ raise Exception("Error: '%s' is needed in %s" % (key, func))
+
+ if choices and kwargs[key] not in choices:
+ raise Exception("Error: %s :%s" % (key, kwargs[key]))
+ ret = func(*args, **kwargs)
+ return ret
+
+ return __deco
+
+ return _deco
+
+
+def dcheck(key, choices=[]):
+ def _deco(func):
+ def __deco(*args):
+ if len(args) == 2:
+ values = args[1]
+ elif len(args) == 1:
+ values = args[0]
+ else:
+ values = None
+ if isinstance(values, dict):
+ if key not in values:
+ raise Exception("Error: '%s' is needed in %s" % (key, func))
+ if choices and values[key] not in choices:
+ raise Exception("Error: %s :%s" % (key, values[key]))
+ ret = func(*args)
+ return ret
+
+ return __deco
+
+ return _deco
+
+
+def vstf_input(key, types=str, choices=[], default=None):
+ def _deco(func):
+ def __deco(*args):
+ ret = func(*args)
+ if not isinstance(ret, dict):
+ ret = {}
+ in_str = "----> %s : " % key
+ if choices:
+ in_str = "---- %s\n" % (str(choices)) + in_str
+ while True:
+ if types == list or types == dict:
+ value = input(in_str)
+ else:
+ value = raw_input(in_str)
+ value = types(value)
+ if not value:
+ value = default
+ if not choices or value in choices:
+ break
+ ret.update({key: value})
+ return ret
+
+ return __deco
+
+ return _deco
+
+
+def namespace():
+ def _deco(func):
+ def __deco(*args, **kwargs):
+ ret = func(*args, **kwargs)
+ nspace = kwargs.get("namespace", None)
+ if nspace:
+ ret = "ip netns exec %(namespace)s " % {"namespace": nspace} + ret
+ return ret
+
+ return __deco
+
+ return _deco
diff --git a/vstf/vstf/common/excepts.py b/vstf/vstf/common/excepts.py
new file mode 100755
index 00000000..dc781b9e
--- /dev/null
+++ b/vstf/vstf/common/excepts.py
@@ -0,0 +1,12 @@
+class ChannelDie(Exception):
+ """rabbitmq's channel connect failed"""
+ pass
+
+
+class UnsolvableExit(Exception):
+ """the soft maybe error , and the code can not solvable, must be exit"""
+ pass
+
+
+class AgentExit(Exception):
+ pass
diff --git a/vstf/vstf/common/input.py b/vstf/vstf/common/input.py
new file mode 100755
index 00000000..9e2f9333
--- /dev/null
+++ b/vstf/vstf/common/input.py
@@ -0,0 +1,20 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# date: 2015-09-09
+# see license for license details
+
+__author__ = 'wly'
+__version__ = '0.1'
+
+import os
+
+
+def raw_choice(title):
+ ret = {'Y': True, 'N': False}
+ while True:
+ os.system("clear")
+ in_str = "\n%s:(y|n) " % title
+ uin = raw_input(in_str).title()
+ if uin in ['Y', 'N']:
+ break
+ return ret[uin]
diff --git a/vstf/vstf/common/log.py b/vstf/vstf/common/log.py
new file mode 100755
index 00000000..b34a8a92
--- /dev/null
+++ b/vstf/vstf/common/log.py
@@ -0,0 +1,41 @@
+import logging
+
+
+def _init_log_to_file(log_file, level, _format):
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(level)
+ file_handler.setFormatter(logging.Formatter(_format))
+ return file_handler
+
+
+def _init_log_to_console(level, _format):
+ console = logging.StreamHandler()
+ console.setLevel(level)
+ console.setFormatter(logging.Formatter(_format))
+ return console
+
+
+def _init_log(log_file, level=logging.INFO, clevel=logging.INFO):
+ _format = '%(asctime)s <%(levelname)s> [%(funcName)s.%(lineno)d]: %(message)s'
+ # _format = '%(asctime)s [%(levelname)s] %(message)s'
+ _verbose = '%(levelname)s %(asctime)s [%(filename)s:%(lineno)d] %(funcName)s ### %(message)s'
+ _simple = '<%(levelname)s> [%(filename)s:%(lineno)d] ### %(message)s'
+ file_handler = _init_log_to_file(log_file, level, _verbose)
+ console = _init_log_to_console(clevel, _simple)
+ return file_handler, console
+
+
+def setup_logging(level=logging.INFO, log_file="/var/log/esp_test.log", clevel=logging.WARNING):
+ log = logging.getLogger()
+ log.setLevel(level)
+ file_handler, console = _init_log(log_file, level, clevel)
+ log.addHandler(file_handler)
+ log.addHandler(console)
+
+
+if __name__ == "__main__":
+ setup_logging()
+ logger = logging.getLogger("common")
+ logger.info('this is a test.')
+ logger.warning('this is a test.')
+ logger.error('this is a test.')
diff --git a/vstf/vstf/common/message.py b/vstf/vstf/common/message.py
new file mode 100755
index 00000000..926091fb
--- /dev/null
+++ b/vstf/vstf/common/message.py
@@ -0,0 +1,141 @@
+import json
+import uuid
+import logging
+import traceback
+from vstf.common import constants
+
+LOG = logging.getLogger(__name__)
+
+
+def json_defaults(obj):
+ if isinstance(obj, set):
+ return list(obj)
+ return "unknow obj"
+
+
+def encode(msg):
+ """obj to string"""
+ if isinstance(msg, str):
+ return msg
+ else:
+ return json.dumps(msg, default=json_defaults)
+
+
+def decode(msg):
+ """string to obj"""
+ if isinstance(msg, str):
+ return json.loads(msg)
+ else:
+ return msg
+
+
+def gen_corrid():
+ return str(uuid.uuid4())
+
+
+def add_context(msg, **kwargs):
+ return {'head': kwargs, 'body': msg}
+
+
+def get_context(msg):
+ if "head" in msg.iterkeys():
+ return msg['head']
+ else:
+ return ""
+
+
+def get_body(msg):
+ if "body" in msg.iterkeys():
+ return msg['body']
+ else:
+ return None
+
+
+def get_corrid(context):
+ """
+ :param return: string of corrid or empty
+ """
+ if "corrid" in context.iterkeys():
+ return context['corrid']
+ else:
+ return ""
+
+
+def send(func, data):
+ # the message must be a string
+ if not isinstance(data, str):
+ raise ValueError("the data must be a string")
+
+ # the message's len must > 0
+ msg_len = len(data)
+ if msg_len <= 0:
+ return True
+
+ # the message's len must be less 999999999
+ if len(str(msg_len)) > constants.MSG_FLAG_LEN:
+ raise ValueError("the data's len too long")
+
+ data = (constants.MSG_FLAG % (msg_len)) + data
+ total_send = msg_len + constants.MSG_FLAG_LEN
+
+ count = 0
+ while count < total_send:
+ sent = func(data[count:])
+ if 0 == sent:
+ raise RuntimeError("socket connection broken")
+ count += sent
+
+ return msg_len
+
+
+def sendto(func, data, addr):
+ # the message must be a string
+ if not isinstance(data, str):
+ raise ValueError("the data must be a string")
+
+ # the message's len must > 0
+ msg_len = len(data)
+ if msg_len <= 0:
+ return True
+
+ # the message's len must be less 999999999
+ if len(str(msg_len)) > constants.MSG_FLAG_LEN:
+ raise ValueError("the data's len too long")
+
+ data = (constants.MSG_FLAG % (msg_len)) + data
+ total_send = msg_len + constants.MSG_FLAG_LEN
+
+ count = 0
+ while count < total_send:
+ sent = func(data[count:], addr)
+ if 0 == sent:
+ raise RuntimeError("socket connection broken")
+ count += sent
+
+ return msg_len
+
+
+def recv(func):
+ head = func(constants.MSG_FLAG_LEN)
+ # the FIN change to '' in python
+ if head == '':
+ raise RuntimeError("socket connection broken")
+
+ if not head.isdigit():
+ raise ValueError("the msg head is not a num.")
+
+ msg_len = int(head)
+ chunks = []
+ count = 0
+ while count < msg_len:
+ chunk = func(min(msg_len - count, constants.buff_size))
+ if chunk == '':
+ raise RuntimeError("socket connection broken")
+ chunks.append(chunk)
+ count += len(chunk)
+
+ return ''.join(chunks)
+
+
+def dumpstrace():
+ return traceback.format_exc()
diff --git a/vstf/vstf/common/perfmark.py b/vstf/vstf/common/perfmark.py
new file mode 100755
index 00000000..f03ee532
--- /dev/null
+++ b/vstf/vstf/common/perfmark.py
@@ -0,0 +1,14 @@
+pktSize = 'AvgFrameSize'
+offLoad = 'OfferedLoad'
+percentLoss = 'PercentLoss'
+bandwidth = 'Bandwidth'
+minLatency = 'MinimumLatency'
+maxLatency = 'MaximumLatency'
+avgLatency = 'AverageLatency'
+txCount = 'TxFrameCount'
+rxCount = 'RxFrameCount'
+duration = 'Duration'
+cpu = 'CPU'
+mppsGhz = 'MppspGhz'
+rxMbps = "RxMbit"
+txMbps = 'TxMbit'
diff --git a/vstf/vstf/common/pyhtml.py b/vstf/vstf/common/pyhtml.py
new file mode 100755
index 00000000..c2a26208
--- /dev/null
+++ b/vstf/vstf/common/pyhtml.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf8 -*-
+# author: wly
+# date: 2015-09-25
+# see license for license details
+
+from sys import stdout, modules
+
+doc_type = '<!DOCTYPE HTML>\n'
+default_title = "Html Page"
+charset = '<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />\n'
+
+html4_tags = {'a', 'abbr', 'acronym', 'address', 'area', 'b', 'base', 'bdo', 'big',
+ 'blockquote', 'body', 'br', 'button', 'caption', 'cite', 'code', 'col',
+ 'colgroup', 'dd', 'del', 'div', 'dfn', 'dl', 'dt', 'em', 'fieldset',
+ 'form', 'frame', 'frameset', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'head',
+ 'hr', 'html', 'i', 'iframe', 'img', 'input', 'ins', 'kbd',
+ 'label', 'legend', 'li', 'link', 'map', 'menu', 'menuitem', 'meta',
+ 'noframes', 'noscript', 'object', 'ol', 'optgroup', 'option', 'p',
+ 'param', 'pre', 'q', 'samp', 'script', 'select', 'small', 'span', 'strong',
+ 'style', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
+ 'thead', 'title', 'tr', 'tt', 'ul', 'var'}
+disused_tags = {'isindex', 'font', 'dir', 's', 'strike',
+ 'u', 'center', 'basefont', 'applet', 'xmp'}
+html5_tags = {'article', 'aside', 'audio', 'bdi', 'canvas', 'command', 'datalist', 'details',
+ 'dialog', 'embed', 'figcaption', 'figure', 'footer', 'header',
+ 'keygen', 'mark', 'meter', 'nav', 'output', 'progress', 'rp', 'rt', 'ruby',
+ 'section', 'source', 'summary', 'details', 'time', 'track', 'video', 'wbr'}
+
+nl = '\n'
+tags = html4_tags | disused_tags | html5_tags
+
+__all__ = [x.title() for x in tags] + ['PyHtml']
+
+self_close = {'input', 'img', 'link', 'br'}
+
+
+class Tag(list):
+ tag_name = ''
+
+ def __init__(self, *args, **kwargs):
+ self.attributes = kwargs
+ if self.tag_name:
+ name = self.tag_name
+ self.is_seq = False
+ else:
+ name = 'sequence'
+ self.is_seq = True
+ self._id = kwargs.get('id', name)
+ for arg in args:
+ self.add_obj(arg)
+
+ def __iadd__(self, obj):
+ if isinstance(obj, Tag) and obj.is_seq:
+ for o in obj:
+ self.add_obj(o)
+ else:
+ self.add_obj(obj)
+ return self
+
+ def add_obj(self, obj):
+ if not isinstance(obj, Tag):
+ obj = str(obj)
+ _id = self.set_id(obj)
+ setattr(self, _id, obj)
+ self.append(obj)
+
+ def set_id(self, obj):
+ if isinstance(obj, Tag):
+ _id = obj._id
+ obj_lst = filter(lambda t: isinstance(
+ t, Tag) and t._id.startswith(_id), self)
+ else:
+ _id = 'content'
+ obj_lst = filter(lambda t: not isinstance(t, Tag), self)
+ length = len(obj_lst)
+ if obj_lst:
+ _id = '%s_%03i' % (_id, length)
+ if isinstance(obj, Tag):
+ obj._id = _id
+ return _id
+
+ def __add__(self, obj):
+ if self.tag_name:
+ return Tag(self, obj)
+ self.add_obj(obj)
+ return self
+
+ def __lshift__(self, obj):
+ if isinstance(obj, Tag):
+ self += obj
+ return obj
+ print "unknown obj: %s " % obj
+ return self
+
+ def render(self):
+ result = ''
+ if self.tag_name:
+ result += '<%s%s%s>' % (self.tag_name,
+ self._render_attr(), self._self_close() * ' /')
+ if not self._self_close():
+ isnl = True
+ for c in self:
+ if isinstance(c, Tag):
+ result += isnl * nl
+ isnl = False
+ result += c.render()
+ else:
+ result += c
+ if self.tag_name:
+ result += '</%s>' % self.tag_name
+ result += nl
+ return result
+
+ def _render_attr(self):
+ result = ''
+ for key, value in self.attributes.iteritems():
+ if key != 'txt' and key != 'open':
+ if key == 'cl':
+ key = 'class'
+ result += ' %s="%s"' % (key, value)
+ return result
+
+ def _self_close(self):
+ return self.tag_name in self_close
+
+"""
+def tag_factory(tag):
+ class F(Tag):
+ tag_name = tag
+
+ F.__name__ = tag.title()
+ return F
+
+
+THIS = modules[__name__]
+
+for t in tags:
+ setattr(THIS, t.title(), tag_factory(t))
+"""
+THIS = modules[__name__]
+for t in tags:
+ obj = type(t.title(), (Tag, ), {'tag_name': t})
+ setattr(THIS, t.title(), obj)
+
+
+def _render_style(style):
+ result = ''
+ for item in style:
+ result += item
+ result += '\n{\n'
+ values = style[item]
+ for key, value in values.iteritems():
+ result += " %s: %s;\n" % (key, value)
+ result += '}\n'
+ if result:
+ result = '\n' + result
+ return result
+
+
+class PyHtml(Tag):
+ tag_name = 'html'
+
+ def __init__(self, title=default_title):
+ self._id = 'html'
+ self += Head()
+ self += Body()
+ self.attributes = dict(xmlns='http://www.w3.org/1999/xhtml', lang='en')
+ self.head += Title(title)
+
+ def __iadd__(self, obj):
+ if isinstance(obj, Head) or isinstance(obj, Body):
+ self.add_obj(obj)
+ elif isinstance(obj, Meta) or isinstance(obj, Link):
+ self.head += obj
+ else:
+ self.body += obj
+ _id = self.set_id(obj)
+ setattr(self, _id, obj)
+ return self
+
+ def add_js(self, *arg):
+ for f in arg:
+ self.head += Script(type='text/javascript', src=f)
+
+ def add_css(self, *arg):
+ for f in arg:
+ self.head += Link(rel='stylesheet', type='text/css', href=f)
+
+ def output(self, name=''):
+ if name:
+ fil = open(name, 'w')
+ else:
+ fil = stdout
+ fil.write(self.as_string())
+ fil.flush()
+ if name:
+ fil.close()
+
+ def as_string(self):
+ return doc_type + self.render()
+
+ def add_style(self, style):
+ self.head += Style(_render_style(style))
+
+ def add_table(self, data):
+ table = self << Table()
+ rows = len(data)
+ cols = len(zip(*data))
+
+ for i in range(rows):
+ tr = table << Tr()
+ tr << Th(data[i][0])
+ for j in range(1, cols):
+ tr << Td(data[i][j])
diff --git a/vstf/vstf/common/rsync.py b/vstf/vstf/common/rsync.py
new file mode 100755
index 00000000..b566136f
--- /dev/null
+++ b/vstf/vstf/common/rsync.py
@@ -0,0 +1,518 @@
+#!/usr/bin/python
+
+# Python conterpart of rsync written by Vivian De Smedt
+# Send any comment or bug report to vivian@vdesmedt.com.
+# I would like to thanks William Tan for its support in tuning rsync.py to support unicode path.
+# I would like to thanks Luc Saffre for its bug reports and fixes.
+
+#from __future__ import nested_scopes
+
+import os, os.path, shutil, glob, re, sys, getopt, stat, string
+
+
+try:
+ import win32file
+except:
+ win32file = None
+
+class Cookie:
+ def __init__(self):
+ self.sink_root = ""
+ self.target_root = ""
+ self.quiet = 0
+ self.recursive = 0
+ self.relative = 0
+ self.dry_run = 0
+ self.time = 0
+ self.update = 0
+ self.cvs_ignore = 0
+ self.ignore_time = 0
+ self.delete = 0
+ self.delete_excluded = 0
+ self.delete_from_source = 0
+ self.size_only = 0
+ self.modify_window = 2
+ self.existing = 0
+ self.filters = []
+ self.case_sensitivity = 0
+ if os.name == "nt":
+ self.case_sensitivity = re.I
+
+def visit(cookie, dirname, names):
+ """Copy files names from sink_root + (dirname - sink_root) to target_root + (dirname - sink_root)"""
+ if os.path.split(cookie.sink_root)[1]: # Should be tested with (C:\Cvs -> C:\)! (C:\Archives\MyDatas\UltraEdit -> C:\Archives\MyDatas) (Cvs -> "")! (Archives\MyDatas\UltraEdit -> Archives\MyDatas) (\Cvs -> \)! (\Archives\MyDatas\UltraEdit -> Archives\MyDatas)
+ dirname = dirname[len(cookie.sink_root) + 1:]
+ else:
+ dirname = dirname[len(cookie.sink_root):]
+ target_dir = os.path.join(cookie.target_root, dirname)
+ if not os.path.isdir(target_dir):
+ makeDir(cookie, target_dir)
+ sink_dir = os.path.join(cookie.sink_root, dirname)
+
+ filters = []
+ if cookie.cvs_ignore:
+ ignore = os.path.join(sink_dir, ".cvsignore")
+ if os.path.isfile(ignore):
+ filters = convertPatterns(ignore, "-")
+ filters = filters + cookie.filters
+
+ names_excluded = []
+ if filters:
+ # filter sink files (names):
+ name_index = 0
+ while name_index < len(names):
+ name = names[name_index]
+ path = os.path.join(dirname, name)
+ path = convertPath(path)
+ if os.path.isdir(os.path.join(sink_dir, name)):
+ path = path + "/"
+ for filter in filters:
+ if re.search(filter[1], path, cookie.case_sensitivity):
+ if filter[0] == '-':
+ sink = os.path.join(sink_dir, name)
+ if cookie.delete_from_source:
+ if os.path.isfile(sink):
+ removeFile(cookie, sink)
+ elif os.path.isdir(sink):
+ removeDir(cookie, sink)
+ else:
+ logError("Sink %s is neither a file nor a folder (skip removal)" % sink)
+ names_excluded += [names[name_index]]
+ del(names[name_index])
+ name_index = name_index - 1
+ break
+ elif filter[0] == '+':
+ break
+ name_index = name_index + 1
+
+ if cookie.delete and os.path.isdir(target_dir):
+ # Delete files and folder in target not present in filtered sink.
+ for name in os.listdir(target_dir):
+ if not cookie.delete_excluded and name in names_excluded:
+ continue
+ if not name in names:
+ target = os.path.join(target_dir, name)
+ if os.path.isfile(target):
+ removeFile(cookie, target)
+ elif os.path.isdir(target):
+ removeDir(cookie, target)
+ else:
+ pass
+
+ for name in names:
+ # Copy files and folder from sink to target.
+ sink = os.path.join(sink_dir, name)
+ #print sink
+ target = os.path.join(target_dir, name)
+ if os.path.exists(target):
+ # When target already exit:
+ if os.path.isfile(sink):
+ if os.path.isfile(target):
+ # file-file
+ if shouldUpdate(cookie, sink, target):
+ updateFile(cookie, sink, target)
+ elif os.path.isdir(target):
+ # file-folder
+ removeDir(cookie, target)
+ copyFile(cookie, sink, target)
+ else:
+ # file-???
+ logError("Target %s is neither a file nor folder (skip update)" % sink)
+
+ elif os.path.isdir(sink):
+ if os.path.isfile(target):
+ # folder-file
+ removeFile(cookie, target)
+ makeDir(cookie, target)
+ else:
+ # ???-xxx
+ logError("Sink %s is neither a file nor a folder (skip update)" % sink)
+
+ elif not cookie.existing:
+ # When target dont exist:
+ if os.path.isfile(sink):
+ # file
+ copyFile(cookie, sink, target)
+ elif os.path.isdir(sink):
+ # folder
+ makeDir(cookie, target)
+ else:
+ logError("Sink %s is neither a file nor a folder (skip update)" % sink)
+
+
+def log(cookie, message):
+ if not cookie.quiet:
+ try:
+ print message
+ except UnicodeEncodeError:
+ print message.encode("utf8")
+
+
+def logError(message):
+ try:
+ sys.stderr.write(message + "\n")
+ except UnicodeEncodeError:
+ sys.stderr.write(message.encode("utf8") + "\n")
+
+
+def shouldUpdate(cookie, sink, target):
+ try:
+ sink_st = os.stat(sink)
+ sink_sz = sink_st.st_size
+ sink_mt = sink_st.st_mtime
+ except:
+ logError("Fail to retrieve information about sink %s (skip update)" % sink)
+ return 0
+
+ try:
+ target_st = os.stat(target)
+ target_sz = target_st.st_size
+ target_mt = target_st.st_mtime
+ except:
+ logError("Fail to retrieve information about target %s (skip update)" % target)
+ return 0
+
+ if cookie.update:
+ return target_mt < sink_mt - cookie.modify_window
+
+ if cookie.ignore_time:
+ return 1
+
+ if target_sz != sink_sz:
+ return 1
+
+ if cookie.size_only:
+ return 0
+
+ return abs(target_mt - sink_mt) > cookie.modify_window
+
+
+def copyFile(cookie, sink, target):
+ log(cookie, "copy: %s to: %s" % (sink, target))
+ if not cookie.dry_run:
+ try:
+ shutil.copyfile(sink, target)
+ except:
+ logError("Fail to copy %s" % sink)
+
+ if cookie.time:
+ try:
+ s = os.stat(sink)
+ os.utime(target, (s.st_atime, s.st_mtime));
+ except:
+ logError("Fail to copy timestamp of %s" % sink)
+
+
+def updateFile(cookie, sink, target):
+ log(cookie, "update: %s to: %s" % (sink, target))
+ if not cookie.dry_run:
+ # Read only and hidden and system files can not be overridden.
+ try:
+ try:
+ if win32file:
+ filemode = win32file.GetFileAttributesW(target)
+ win32file.SetFileAttributesW(target, filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
+ else:
+ os.chmod(target, stat.S_IWUSR)
+ except:
+ #logError("Fail to allow override of %s" % target)
+ pass
+
+ shutil.copyfile(sink, target)
+ if cookie.time:
+ try:
+ s = os.stat(sink)
+ os.utime(target, (s.st_atime, s.st_mtime));
+ except:
+ logError("Fail to copy timestamp of %s" % sink) # The utime api of the 2.3 version of python is not unicode compliant.
+ except:
+ logError("Fail to override %s" % sink)
+
+ if win32file:
+ win32file.SetFileAttributesW(target, filemode)
+
+
+def prepareRemoveFile(path):
+ if win32file:
+ filemode = win32file.GetFileAttributesW(path)
+ win32file.SetFileAttributesW(path, filemode & ~win32file.FILE_ATTRIBUTE_READONLY & ~win32file.FILE_ATTRIBUTE_HIDDEN & ~win32file.FILE_ATTRIBUTE_SYSTEM)
+ else:
+ os.chmod(path, stat.S_IWUSR)
+
+
+def removeFile(cookie, target):
+ # Read only files could not be deleted.
+ log(cookie, "remove: %s" % target)
+ if not cookie.dry_run:
+ try:
+ try:
+ prepareRemoveFile(target)
+ except:
+ #logError("Fail to allow removal of %s" % target)
+ pass
+
+ os.remove(target)
+ except:
+ logError("Fail to remove %s" % target)
+
+
+
+def makeDir(cookie, target):
+ log(cookie, "make dir: %s" % target)
+ if not cookie.dry_run:
+ try:
+ os.makedirs(target)
+ except:
+ logError("Fail to make dir %s" % target)
+
+
+def visitForPrepareRemoveDir(arg, dirname, names):
+ for name in names:
+ path = os.path.join(dirname, name)
+ prepareRemoveFile(path)
+
+
+def prepareRemoveDir(path):
+ prepareRemoveFile(path)
+ os.path.walk(path, visitForPrepareRemoveDir, None)
+
+
+def OnRemoveDirError(func, path, excinfo):
+ logError("Fail to remove %s" % path)
+
+
+def removeDir(cookie, target):
+ # Read only directory could not be deleted.
+ log(cookie, "remove dir: %s" % target)
+ if not cookie.dry_run:
+ prepareRemoveDir(target)
+ try:
+ shutil.rmtree(target, False, OnRemoveDirError)
+ except:
+ logError("Fail to remove dir %s" % target)
+
+
+def convertPath(path):
+ # Convert windows, mac path to unix version.
+ separator = os.path.normpath("/")
+ if separator != "/":
+ path = re.sub(re.escape(separator), "/", path)
+
+ # Help file, folder pattern to express that it should match the all file or folder name.
+ path = "/" + path
+ return path
+
+
+def convertPattern(pattern, sign):
+ """Convert a rsync pattern that match against a path to a filter that match against a converted path."""
+
+ # Check for include vs exclude patterns.
+ if pattern[:2] == "+ ":
+ pattern = pattern[2:]
+ sign = "+"
+ elif pattern[:2] == "- ":
+ pattern = pattern[2:]
+ sign = "-"
+
+ # Express windows, mac patterns in unix patterns (rsync.py extension).
+ separator = os.path.normpath("/")
+ if separator != "/":
+ pattern = re.sub(re.escape(separator), "/", pattern)
+
+ # If pattern contains '/' it should match from the start.
+ temp = pattern
+ if pattern[0] == "/":
+ pattern = pattern[1:]
+ if temp[-1] == "/":
+ temp = temp[:-1]
+
+ # Convert pattern rules: ** * ? to regexp rules.
+ pattern = re.escape(pattern)
+ pattern = string.replace(pattern, "\\?", ".")
+ pattern = string.replace(pattern, "\\*\\*", ".*")
+ pattern = string.replace(pattern, "\\*", "[^/]*")
+ pattern = string.replace(pattern, "\\*", ".*")
+
+ if "/" in temp:
+ # If pattern contains '/' it should match from the start.
+ pattern = "^\\/" + pattern
+ else:
+ # Else the pattern should match the all file or folder name.
+ pattern = "\\/" + pattern
+
+ if pattern[-2:] != "\\/" and pattern[-2:] != ".*":
+ # File patterns should match also folders.
+ pattern = pattern + "\\/?"
+
+ # Pattern should match till the end.
+ pattern = pattern + "$"
+ return (sign, pattern)
+
+
+def convertPatterns(path, sign):
+ """Read the files for pattern and return a vector of filters"""
+ filters = []
+ f = open(path, "r")
+ while 1:
+ pattern = f.readline()
+ if not pattern:
+ break
+ if pattern[-1] == "\n":
+ pattern = pattern[:-1]
+
+ if re.match("[\t ]*$", pattern):
+ continue
+ if pattern[0] == "#":
+ continue
+ filters = filters + [convertPattern(pattern, sign)]
+ f.close()
+ return filters
+
+
+def printUsage():
+ """Print the help string that should printed by rsync.py -h"""
+ print "usage: rsync.py [options] source target"
+ print """
+ -q, --quiet decrease verbosity
+ -r, --recursive recurse into directories
+ -R, --relative use relative path names
+ -u, --update update only (don't overwrite newer files)
+ -t, --times preserve times
+ -n, --dry-run show what would have been transferred
+ --existing only update files that already exist
+ --delete delete files that don't exist on the sending side
+ --delete-excluded also delete excluded files on the receiving side
+ --delete-from-source delete excluded files on the receiving side
+ -I, --ignore-times don't exclude files that match length and time
+ --size-only only use file size when determining if a file should
+ be transferred
+ --modify-window=NUM timestamp window (seconds) for file match (default=2)
+ --existing only update existing target files or folders
+ -C, --cvs-exclude auto ignore files in the same way CVS does
+ --exclude=PATTERN exclude files matching PATTERN
+ --exclude-from=FILE exclude patterns listed in FILE
+ --include=PATTERN don't exclude files matching PATTERN
+ --include-from=FILE don't exclude patterns listed in FILE
+ --version print version number
+ -h, --help show this help screen
+
+See http://www.vdesmedt.com/~vds2212/rsync.html for informations and updates.
+Send an email to vivian@vdesmedt.com for comments and bug reports."""
+
+
+def printVersion():
+ print "rsync.py version 2.0.1"
+
+
+def main(args):
+ cookie = Cookie()
+
+ opts, args = getopt.getopt(args, "qrRntuCIh", ["quiet", "recursive", "relative", "dry-run", "time", "update", "cvs-ignore", "ignore-times", "help", "delete", "delete-excluded", "delete-from-source", "existing", "size-only", "modify-window=", "exclude=", "exclude-from=", "include=", "include-from=", "version"])
+ for o, v in opts:
+ if o in ["-q", "--quiet"]:
+ cookie.quiet = 1
+ if o in ["-r", "--recursive"]:
+ cookie.recursive = 1
+ if o in ["-R", "--relative"]:
+ cookie.relative = 1
+ elif o in ["-n", "--dry-run"]:
+ cookie.dry_run = 1
+ elif o in ["-t", "--times", "--time"]: # --time is there to guaranty backward compatibility with previous buggy version.
+ cookie.time = 1
+ elif o in ["-u", "--update"]:
+ cookie.update = 1
+ elif o in ["-C", "--cvs-ignore"]:
+ cookie.cvs_ignore = 1
+ elif o in ["-I", "--ignore-time"]:
+ cookie.ignore_time = 1
+ elif o == "--delete":
+ cookie.delete = 1
+ elif o == "--delete-excluded":
+ cookie.delete = 1
+ cookie.delete_excluded = 1
+ elif o == "--delete-from-source":
+ cookie.delete_from_source = 1
+ elif o == "--size-only":
+ cookie.size_only = 1
+ elif o == "--modify-window":
+ cookie.modify_window = int(v)
+ elif o == "--existing":
+ cookie.existing = 1
+ elif o == "--exclude":
+ cookie.filters = cookie.filters + [convertPattern(v, "-")]
+ elif o == "--exclude-from":
+ cookie.filters = cookie.filters + convertPatterns(v, "-")
+ elif o == "--include":
+ cookie.filters = cookie.filters + [convertPattern(v, "+")]
+ elif o == "--include-from":
+ cookie.filters = cookie.filters + convertPatterns(v, "+")
+ elif o == "--version":
+ printVersion()
+ return 0
+ elif o in ["-h", "--help"]:
+ printUsage()
+ return 0
+
+ if len(args) <= 1:
+ printUsage()
+ return 1
+
+ #print cookie.filters
+
+ target_root = args[1]
+ try: # In order to allow compatibility below 2.3.
+ pass
+ if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
+ target_root = unicode(target_root, sys.getfilesystemencoding())
+ finally:
+ cookie.target_root = target_root
+
+ sinks = glob.glob(args[0])
+ if not sinks:
+ return 0
+
+ sink_families = {}
+ for sink in sinks:
+ try: # In order to allow compatibility below 2.3.
+ if os.path.__dict__.has_key("supports_unicode_filenames") and os.path.supports_unicode_filenames:
+ sink = unicode(sink, sys.getfilesystemencoding())
+ except:
+ pass
+ sink_name = ""
+ sink_root = sink
+ sink_drive, sink_root = os.path.splitdrive(sink)
+ while not sink_name:
+ if sink_root == os.path.sep:
+ sink_name = "."
+ break
+ sink_root, sink_name = os.path.split(sink_root)
+ sink_root = sink_drive + sink_root
+ if not sink_families.has_key(sink_root):
+ sink_families[sink_root] = []
+ sink_families[sink_root] = sink_families[sink_root] + [sink_name]
+
+ for sink_root in sink_families.keys():
+ if cookie.relative:
+ cookie.sink_root = ""
+ else:
+ cookie.sink_root = sink_root
+
+ global y # In order to allow compatibility below 2.1 (nested scope where used before).
+ y = sink_root
+ files = filter(lambda x: os.path.isfile(os.path.join(y, x)), sink_families[sink_root])
+ if files:
+ visit(cookie, sink_root, files)
+
+ #global y # In order to allow compatibility below 2.1 (nested scope where used before).
+ y = sink_root
+ folders = filter(lambda x: os.path.isdir(os.path.join(y, x)), sink_families[sink_root])
+ for folder in folders:
+ folder_path = os.path.join(sink_root, folder)
+ if not cookie.recursive:
+ visit(cookie, folder_path, os.listdir(folder_path))
+ else:
+ os.path.walk(folder_path, visit, cookie)
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/vstf/vstf/common/saltstack.py b/vstf/vstf/common/saltstack.py
new file mode 100755
index 00000000..efc953c4
--- /dev/null
+++ b/vstf/vstf/common/saltstack.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+# coding=utf-8
+import os
+import sys
+import inspect
+import logging
+import salt.client as sclient
+
+from vstf.common import cmds
+
+log = logging.getLogger(__name__)
+
+
+class Mysalt(object):
+ IS_DIR = 1
+ IS_FILE = 2
+ FAILED = -1
+
+ def __init__(self):
+ self.cur_path = os.path.abspath(os.path.dirname(inspect.stack()[1][1]))
+ self.salt_conf = "/etc/salt/master"
+ if not os.path.exists(self.salt_conf):
+ raise Exception("this python must be run on the salt master.")
+ self.pillar_path = str(
+ cmds.execute("grep '^pillar_roots' \
+ /etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
+ if self.pillar_path == "":
+ log.warning("pillar path not found, make sure the pillar_roots configed")
+ else:
+ os.system("mkdir -p " + self.pillar_path)
+
+ self.state_path = str(cmds.execute("grep '^file_roots' \
+ /etc/salt/master -A 2 | sed 1,2d | awk '{print $2}'") + '/')
+ if self.state_path == "":
+ log.warning("state path not found, make sure the file_roots configed")
+ else:
+ os.system("mkdir -p " + self.state_path)
+
+ self.salt = sclient.LocalClient()
+
+ def slave_exists(self, host):
+ pslave = "/etc/salt/pki/master/minions/" + host
+ if os.path.exists(pslave):
+ return True
+ else:
+ return False
+
+ def __is_dir_or_file(self, src):
+ if not os.path.exists(src):
+ return self.FAILED
+ if os.path.isdir(src):
+ return self.IS_DIR
+ elif os.path.isfile(src):
+ return self.IS_FILE
+ else:
+ return self.FAILED
+
+ def __copy_target(self, target, flag=""):
+ if not os.path.exists(target):
+ log.error("target %(d)s not exists.", {'d': target})
+ return False
+
+ if flag == "pillar":
+ dst = self.pillar_path
+ elif flag == "state":
+ dst = self.state_path
+ else:
+ log.error("this file or dir not pillar or state, can not support now.")
+ return False
+
+ if self.IS_FILE == self.__is_dir_or_file(target):
+ os.system('cp ' + target + ' ' + dst)
+ else:
+ os.system("cp -r " + target + ' ' + dst)
+ return True
+
+ def copy(self, host, src, dst):
+ """copy file or dir to slave.
+ :src a file or a dir
+ :dst if src is a file, the dst must be like this /home/xx.py, not /home
+ """
+
+ '''check if the host exists on the master'''
+ if not self.slave_exists(host):
+ log.error("the host %(h)s is not held by master, please check.")
+ return False
+
+ '''copy file to salt's file_roots'''
+ if not self.__copy_target(src, "state"):
+ return False
+
+ if self.IS_DIR == self.__is_dir_or_file(src):
+ dir_name = os.path.basename(src)
+ self.salt.cmd(host, "cp.get_dir", ["salt://" + dir_name, dst])
+ elif self.IS_FILE == self.__is_dir_or_file(src):
+ file_name = os.path.basename(src)
+ print self.salt.cmd(host, "cp.get_file", ["salt://" + file_name, dst])
+ else:
+ log.error("not file and not dir, what is it")
+ return False
+ return True
+
+ def __luxuriant_line(self, str, color):
+ if "red" == color:
+ return "\033[22;35;40m" + str + "\033[0m"
+ elif "green" == color:
+ return "\033[22;32;40m" + str + "\033[0m"
+ else:
+ return str
+
+ def result_check(self, ret, host):
+ num_s = 0
+ num_f = 0
+ msg = ""
+ try:
+ for key in ret[host].keys():
+ if True == ret[host][key]['result']:
+ num_s += 1
+ else:
+ num_f += 1
+ msg = msg + self.__luxuriant_line("Failed %d:\n" % num_f, "red")
+ msg = msg + "\t" + key + '\n'
+ msg = msg + self.__luxuriant_line("\t%s\n" % ret[host][key]['comment'], "red")
+ if True == ret[host][key]['changes'].has_key('retcode'):
+ msg = msg + "RETCODE: %s\n" % (ret[host][key]['changes']['retcode'])
+ if True == ret[host][key]['changes'].has_key('stderr'):
+ msg = msg + "STDERR: %s\n" % (ret[host][key]['changes']['stderr'])
+ if True == ret[host][key]['changes'].has_key('stdout'):
+ msg = msg + "STDOUT: %s\n" % (ret[host][key]['changes']['stdout'])
+ msg = msg + self.__luxuriant_line("total success: %d\n" % num_s, "green")
+ msg = msg + self.__luxuriant_line("failed: %d\n" % num_f, "red")
+ except Exception as e:
+ log.error("sorry, thy to check result happend error, <%(e)s>.\nret:%(ret)s",
+ {'e': e, 'ret': ret})
+ return -1
+ log.info(':\n' + msg)
+ return num_f
+
+ def run_state(self, host, fstate, ext_pillar={}, care_result=True):
+ try:
+ log.info("salt " + host + " state.sls " +
+ fstate + ' pillar=\'' + str(ext_pillar) + '\'')
+ ret = self.salt.cmd(host, 'state.sls', [fstate, 'pillar=' + str(ext_pillar)], 180, 'list')
+ except Exception as e:
+ log.error("try to init host %(host)s happend error: <%(e)s>.",
+ {'host': host, 'e': e})
+ if True == care_result:
+ raise e
+
+ if 0 != self.result_check(ret, host) and care_result:
+ sys.exit(-1)
+ return True
+
+ def salt_cmd(self, host, cmd):
+ # import pdb
+ # pdb.set_trace()
+ logging.info("Begin to run cmd %s on %s" % (host, cmd))
+
+ try:
+ ret = self.salt.cmd(host, 'cmd.run', [cmd])
+ except Exception:
+ log.error("Remote salt execute failed.")
+ return ret
+
+ def copy_by_state(self, host, src, state_cmd, **kwargs):
+ '''the src must be a dir, and the state.sls
+ must be the name of the dir name'''
+
+ if not self.slave_exists(host):
+ log.error("the host %(h)s is not held by master, please check.")
+ return False
+
+ if not self.__copy_target(src, "state"):
+ return False
+
+ return self.run_state(host, state_cmd, kwargs, care_result=True)
+
+ def get_master_ip(self, host=None):
+ if not host:
+ ret = cmds.execute("grep '^interface:' /etc/salt/master | awk '{print $2}'").strip()
+ return ret
+ try:
+ ret = self.salt.cmd(host, "grains.item", ["master"])[host]['master']
+ except Exception:
+ log.error("salt happened error when get master ip")
+ return ""
+ return ret
+
+
+mysalt = Mysalt()
diff --git a/vstf/vstf/common/ssh.py b/vstf/vstf/common/ssh.py
new file mode 100755
index 00000000..1f7eddc3
--- /dev/null
+++ b/vstf/vstf/common/ssh.py
@@ -0,0 +1,230 @@
+'''
+Created on 2015-7-23
+
+@author: y00228926
+'''
+import os
+import logging
+from stat import S_ISDIR
+import Queue
+import shutil
+import paramiko
+from paramiko.ssh_exception import AuthenticationException
+
+LOG = logging.getLogger(__name__)
+
+
+class SSHClientContext(paramiko.SSHClient):
+ def __init__(self, ip, user, passwd, port=22):
+ self.host = ip
+ self.user = user
+ self.passwd = passwd
+ self.port = port
+ super(SSHClientContext, self).__init__()
+
+ def sync_exec_command(self, cmd):
+ _, stdout, stderr = self.exec_command(cmd)
+ ret = stdout.channel.recv_exit_status()
+ out = stdout.read().strip()
+ err = stderr.read().strip()
+ LOG.info("in %s,%s,return:%s,output:%s:error:%s" % (self.host, cmd, ret, out, err))
+ return ret, out, err
+
+ def connect(self):
+ super(SSHClientContext, self).connect(self.host, self.port, self.user, self.passwd, timeout=10)
+
+ def __enter__(self):
+ self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ if exc_type == AuthenticationException:
+ return False
+
+
+class SFTPClientContext(object):
+ def __init__(self, ip, user, passwd, port=22):
+ self.host = ip
+ self.passwd = passwd
+ self.user = user
+ self.port = port
+
+ def connect(self):
+ self.t = paramiko.Transport((self.host, self.port))
+ self.t.connect(username=self.user, password=self.passwd)
+ self.sftp = paramiko.SFTPClient.from_transport(self.t)
+
+ def get(self, remote, local):
+ self.sftp.get(remote, local)
+
+ def put(self, local, remote):
+ self.sftp.put(local, remote)
+
+ def mkdir(self, path):
+ self.sftp.mkdir(path)
+
+ def rmdir(self, path):
+ self.sftp.rmdir(path)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type == TypeError:
+ return False
+ return False
+
+
+def upload_conf_file(host, user, passwd, src, dst):
+ with SFTPClientContext(host, user, passwd) as ftp:
+ ftp.connect()
+ LOG.info('putting file:%s to %s:%s' % (src, host, dst))
+ ftp.put(src, dst)
+
+
+def upload_dir(host, user, passwd, local_dir, remote_dir):
+ assert remote_dir.startswith('/')
+ assert local_dir != '/'
+ while local_dir.endswith('/'):
+ local_dir = local_dir[:-1]
+ while remote_dir.endswith('/'):
+ remote_dir = remote_dir[:-1]
+ remote_dir = os.path.join(remote_dir, os.path.basename(local_dir))
+ ret, _, _ = run_cmd(host, user, passwd, "sudo rm -rf %s" % remote_dir)
+ if ret != 0 and ret != 1:
+ LOG.error("somehow failed in rm -rf %s on host:%s,return:%s" % (remote_dir, host, ret))
+ exit(1)
+ with SFTPClientContext(host, user, passwd) as sftp:
+ sftp.connect()
+ for root, dirs, files in os.walk(local_dir):
+ for filename in files:
+ local_file = os.path.join(root, filename)
+ remote_file = local_file.replace(local_dir, remote_dir)
+ try:
+ sftp.put(local_file, remote_file)
+ except IOError:
+ sftp.mkdir(os.path.split(remote_file)[0])
+ sftp.put(local_file, remote_file)
+ LOG.info("upload %s to remote %s" % (local_file, remote_file))
+ for name in dirs:
+ local_path = os.path.join(root, name)
+ remote_path = local_path.replace(local_dir, remote_dir)
+ try:
+ sftp.mkdir(remote_path)
+ LOG.info("mkdir path %s" % remote_path)
+ except Exception, e:
+ raise
+ return remote_dir
+
+
+def isdir(path, sftp):
+ exists = True
+ is_dir = False
+ file_stat = None
+ try:
+ file_stat = sftp.stat(path).st_mode
+ is_dir = S_ISDIR(file_stat)
+ except IOError:
+ exists = False
+ return exists, is_dir, file_stat
+
+
+def download_file(host, user, passwd, remote_path, local_path):
+ assert not remote_path.endswith('/')
+ remote_file_name = os.path.basename(remote_path)
+ if local_path.endswith('/'):
+ if not os.path.exists(local_path):
+ raise Exception('path:%s not exist.' % local_path)
+ dest = os.path.join(local_path, remote_file_name)
+ else:
+ if os.path.isdir(local_path):
+ dest = os.path.join(local_path, remote_file_name)
+ else:
+ dir_path = os.path.dirname(local_path)
+ if not os.path.exists(dir_path):
+ raise Exception('path:%s not exist' % dir_path)
+ dest = local_path
+ transport = paramiko.Transport((host, 22))
+ transport.connect(username=user, password=passwd)
+ sftp = paramiko.SFTPClient.from_transport(transport)
+ exists, is_dir, st = isdir(remote_path, sftp)
+ if exists and not is_dir:
+ sftp.get(remote_path, dest)
+ os.chmod(dest, st)
+ else:
+ raise Exception('error:cannot find the file or file is dir')
+ return True
+
+
+def download_dir(host, user, passwd, remote_path, local_path):
+ while remote_path.endswith('/'):
+ remote_path = remote_path[:-1]
+ if local_path.endswith('/'):
+ if not os.path.exists(local_path):
+ raise Exception('path:%s not exist.' % local_path)
+ dest_path = os.path.join(local_path, os.path.basename(remote_path))
+ else:
+ if os.path.isdir(local_path):
+ dest_path = os.path.join(local_path, os.path.basename(remote_path))
+ else:
+ dir_name = os.path.dirname(local_path)
+ if os.path.exists(dir_name):
+ dest_path = local_path
+ else:
+ raise Exception('path:%s is not exists' % dir_name)
+ LOG.info("download_dir from host:%s:%s to dest:%s" % (host, remote_path, dest_path))
+ transport = paramiko.Transport((host, 22))
+ transport.connect(username=user, password=passwd)
+ sftp = paramiko.SFTPClient.from_transport(transport)
+ exists, is_dir, _ = isdir(remote_path, sftp)
+ if exists and is_dir:
+ q = Queue.Queue(0)
+ q.put(remote_path)
+ while not q.empty():
+ path = q.get()
+ st = sftp.lstat(path).st_mode
+ relative_path = path[len(remote_path):]
+ if relative_path.startswith('/'): relative_path = relative_path[1:]
+ local = os.path.join(dest_path, relative_path)
+ if os.path.exists(local):
+ shutil.rmtree(local)
+ os.mkdir(local)
+ os.chmod(local, st)
+ file_list = sftp.listdir(path)
+ for item in file_list:
+ fullpath = os.path.join(path, item)
+ _, is_dir, st = isdir(fullpath, sftp)
+ if is_dir:
+ q.put(fullpath)
+ else:
+ dest = os.path.join(local, item)
+ sftp.get(fullpath, dest)
+ os.chmod(dest, st)
+ else:
+ raise Exception('path:%s:%s not exists or is not a dir' % (host, remote_path))
+ return dest_path
+
+
+def run_cmd(host, user, passwd, cmd):
+ with SSHClientContext(host, user, passwd) as ssh:
+ ssh.connect()
+ ret, stdout, stderr = ssh.sync_exec_command(cmd)
+ return ret, stdout, stderr
+
+
+class SshFileTransfer(object):
+ def __init__(self, ip, user, passwd):
+ self.ip, self.user, self.passwd = ip, user, passwd
+
+ def upload_dir(self, src, dst):
+ return upload_dir(self.ip, self.user, self.passwd, src, dst)
+
+ def download_dir(self, src, dst):
+ download_dir(self.ip, self.user, self.passwd, src, dst)
+
+ def upload_file(self, src, dst):
+ upload_conf_file(self.ip, self.user, self.passwd, src, dst)
+
+ def download_file(self, src, dst):
+ download_file(self.ip, self.user, self.passwd, src, dst)
diff --git a/vstf/vstf/common/test_func.py b/vstf/vstf/common/test_func.py
new file mode 100755
index 00000000..9b1d24f9
--- /dev/null
+++ b/vstf/vstf/common/test_func.py
@@ -0,0 +1,14 @@
+from vstf.common import cliutil as util
+
+
+@util.arg("--test",
+ dest="test",
+ default="",
+ help="a params of test-xx")
+@util.arg("--xx",
+ dest="xx",
+ default="",
+ help="a params of test-xx")
+def do_test_xx(args):
+ """this is a help doc"""
+ print "run test01 " + args.test + args.xx \ No newline at end of file
diff --git a/vstf/vstf/common/unix.py b/vstf/vstf/common/unix.py
new file mode 100755
index 00000000..f74944e2
--- /dev/null
+++ b/vstf/vstf/common/unix.py
@@ -0,0 +1,53 @@
+import os
+import socket
+from vstf.common import constants
+from vstf.common import message
+
+
+class UdpServer(object):
+ def __init__(self):
+ super(UdpServer, self).__init__()
+ try:
+ os.unlink(constants.sockaddr)
+ except OSError:
+ if os.path.exists(constants.sockaddr):
+ raise Exception("socket not found %s" % constants.sockaddr)
+ self.conn=socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+ def listen(self,backlog=5):
+ self.conn.listen(backlog)
+
+ def accept(self):
+ return self.conn.accept()
+
+ def bind(self, addr=constants.sockaddr):
+ return self.conn.bind(addr)
+
+# def send(self, data, addr):
+# return message.sendto(self.conn.sendto, data, addr)
+
+# def recv(self, size=constants.buff_size):
+# return message.recv(self.conn.recvfrom)
+
+ def close(self):
+ self.conn.close()
+
+
+class UdpClient(object):
+ def __init__(self):
+ super(UdpClient, self).__init__()
+ if not os.path.exists(constants.sockaddr):
+ raise Exception("socket not found %s" % constants.sockaddr)
+ self.conn=socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+ def connect(self, addr=constants.sockaddr):
+ return self.conn.connect(addr)
+
+ def send(self, data):
+ message.send(self.conn.send, data)
+
+ def recv(self):
+ return message.recv(self.conn.recv)
+
+ def close(self):
+ self.conn.close() \ No newline at end of file
diff --git a/vstf/vstf/common/utils.py b/vstf/vstf/common/utils.py
new file mode 100755
index 00000000..542eaa88
--- /dev/null
+++ b/vstf/vstf/common/utils.py
@@ -0,0 +1,247 @@
+import re
+import logging
+import subprocess
+import random
+import os
+import signal
+import time
+from StringIO import StringIO
+
+LOG = logging.getLogger(__name__)
+
+
+def info():
+ def _deco(func):
+ def __deco(*args, **kwargs):
+ if "shell" in kwargs and not kwargs["shell"]:
+ LOG.info(' '.join(args[0]))
+ else:
+ LOG.info(args[0])
+ return func(*args, **kwargs)
+ return __deco
+ return _deco
+
+
+@info()
+def call(cmd, shell=False):
+ ret = subprocess.call(cmd, shell=shell)
+ if ret != 0:
+ LOG.info("warning: %s not success.", cmd)
+
+
+@info()
+def check_call(cmd, shell=False):
+ subprocess.check_call(cmd, shell=shell)
+
+
+@info()
+def check_output(cmd, shell=False):
+ return subprocess.check_output(cmd, shell=shell)
+
+
+@info()
+def my_popen(cmd, shell=False, stdout=None, stderr=None):
+ return subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)
+
+
+def ping(ip):
+ cmd = "ping -w2 -c1 %s" % ip
+ p = my_popen(cmd, shell=True)
+ return 0 == p.wait()
+
+
+def get_device_name(bdf):
+ path = '/sys/bus/pci/devices/0000:%s/net/' % bdf
+ path1 = '/sys/bus/pci/devices/0000:%s/virtio*/net/' % bdf
+ if os.path.exists(path):
+ device = check_output("ls " + path, shell=True).strip()
+ return device
+ else: # virtio driver
+ try:
+ device = check_output("ls " + path1, shell=True).strip()
+ return device
+ except Exception:
+ return None
+
+
+def my_sleep(delay):
+ LOG.info('sleep %s' % delay)
+ time.sleep(delay)
+
+
+def my_mkdir(filepath):
+ try:
+ LOG.info("mkdir -p %s" % filepath)
+ os.makedirs(filepath)
+ except OSError, e:
+ if e.errno == 17:
+ LOG.info("! %s already exists" % filepath)
+ else:
+ raise
+
+
+def get_eth_by_bdf(bdf):
+ bdf = bdf.replace(' ', '')
+ path = '/sys/bus/pci/devices/0000:%s/net/' % bdf
+ if os.path.exists(path):
+ device = check_output("ls " + path, shell=True).strip()
+ else:
+ raise Exception("cann't get device name of bdf:%s" % bdf)
+ return device
+
+
+def check_and_kill(process):
+ cmd = "ps -ef | grep -v grep | awk '{print $8}' | grep -w %s | wc -l" % process
+ out = check_output(cmd, shell=True)
+ if int(out):
+ check_call(['killall', process])
+
+
+def list_mods():
+ return check_output("lsmod | sed 1,1d | awk '{print $1}'", shell=True).split()
+
+
+def check_and_rmmod(mod):
+ if mod in list_mods():
+ check_call(['rmmod', mod])
+
+
+def kill_by_name(process):
+ out = check_output(['ps', '-A'])
+ for line in out.splitlines():
+ values = line.split()
+ pid, name = values[0], values[3]
+ if process == name:
+ pid = int(pid)
+ os.kill(pid, signal.SIGKILL)
+ LOG.info("os.kill(%s)" % pid)
+
+
+def ns_cmd(ns, cmd):
+ netns_exec_str = "ip netns exec %s "
+ if ns in (None, 'null', 'None', 'none'):
+ pass
+ else:
+ cmd = (netns_exec_str % ns) + cmd
+ return cmd
+
+
+def randomMAC():
+ mac = [0x00, 0x16, 0x3e,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ return ':'.join(map(lambda x: "%02x" % x, mac))
+
+
+class IPCommandHelper(object):
+ def __init__(self, ns=None):
+ self.devices = []
+ self.macs = []
+ self.device_mac_map = {}
+ self.mac_device_map = {}
+ self.bdf_device_map = {}
+ self.device_bdf_map = {}
+ self.mac_bdf_map = {}
+ self.bdf_mac_map = {}
+ cmd = "ip link"
+ if ns:
+ cmd = "ip netns exec %s " % ns + cmd
+ buf = check_output(cmd, shell=True)
+ sio = StringIO(buf)
+ for line in sio:
+ m = re.match(r'^\d+:(.*):.*', line)
+ if m and m.group(1).strip() != "lo":
+ device = m.group(1).strip()
+ self.devices.append(device)
+ mac = self._get_mac(ns, device)
+ self.macs.append(mac)
+ for device, mac in zip(self.devices, self.macs):
+ self.device_mac_map[device] = mac
+ self.mac_device_map[mac] = device
+
+ cmd = "ethtool -i %s"
+ if ns:
+ cmd = "ip netns exec %s " % ns + cmd
+ for device in self.devices:
+ buf = check_output(cmd % device, shell=True)
+ bdfs = re.findall(r'^bus-info: \d{4}:(\d{2}:\d{2}\.\d*)$', buf, re.MULTILINE)
+ if bdfs:
+ self.bdf_device_map[bdfs[0]] = device
+ self.device_bdf_map[device] = bdfs[0]
+ mac = self.device_mac_map[device]
+ self.mac_bdf_map[mac] = bdfs[0]
+ self.bdf_mac_map[bdfs[0]] = mac
+
+ @staticmethod
+ def _get_mac(ns, device):
+ cmd = "ip addr show dev %s" % device
+ if ns:
+ cmd = "ip netns exec %s " % ns + cmd
+ buf = check_output(cmd, shell=True)
+ macs = re.compile(r"[A-F0-9]{2}(?::[A-F0-9]{2}){5}", re.IGNORECASE | re.MULTILINE)
+ for mac in macs.findall(buf):
+ if mac.lower() not in ('00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff'):
+ return mac
+ return None
+
+ def get_device_verbose(self, identity):
+ if identity in self.device_mac_map:
+ device = identity
+ elif identity in self.bdf_device_map:
+ device = self.bdf_device_map[identity]
+ elif identity in self.mac_device_map:
+ device = self.mac_device_map[identity]
+ else:
+ raise Exception("cann't find the device by identity:%s" % identity)
+ detail = {
+ 'bdf': self.device_bdf_map[device] if device in self.device_bdf_map else None,
+ 'iface': device,
+ 'mac': self.device_mac_map[device] if device in self.device_mac_map else None,
+ }
+ return detail
+
+
+class AttrDict(dict):
+ """A dictionary with attribute-style access. It maps attribute access to
+ the real dictionary. """
+
+ def __init__(self, init={}):
+ dict.__init__(self, init)
+
+ def __getstate__(self):
+ return self.__dict__.items()
+
+ def __setstate__(self, items):
+ for key, val in items:
+ self.__dict__[key] = val
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
+
+ def __setitem__(self, key, value):
+ return super(AttrDict, self).__setitem__(key, value)
+
+ def __getitem__(self, name):
+ return super(AttrDict, self).__getitem__(name)
+
+ def __delitem__(self, name):
+ return super(AttrDict, self).__delitem__(name)
+
+ __getattr__ = __getitem__
+ __setattr__ = __setitem__
+
+ def copy(self):
+ ch = AttrDict(self)
+ return ch
+
+
+if __name__ == "__main__":
+ ipcmd = IPCommandHelper()
+ print ipcmd.device_mac_map
+ print ipcmd.mac_device_map
+ print ipcmd.bdf_device_map
+ print ipcmd.device_bdf_map
+ print ipcmd.mac_bdf_map
+ print ipcmd.bdf_mac_map
+ print ipcmd.get_device_verbose("tap0")
diff --git a/vstf/vstf/common/vstfcli.py b/vstf/vstf/common/vstfcli.py
new file mode 100755
index 00000000..9dc99779
--- /dev/null
+++ b/vstf/vstf/common/vstfcli.py
@@ -0,0 +1,62 @@
+import argparse
+import sys
+
+
+class VstfHelpFormatter(argparse.HelpFormatter):
+ def start_section(self, heading):
+ # Title-case the headings
+ heading = '%s%s' % (heading[0].upper(), heading[1:])
+ super(VstfHelpFormatter, self).start_section(heading)
+
+
+class VstfParser(argparse.ArgumentParser):
+ def __init__(self,
+ prog='vstf',
+ description="",
+ epilog='',
+ add_help=True,
+ formatter_class=VstfHelpFormatter):
+
+ super(VstfParser, self).__init__(
+ prog=prog,
+ description=description,
+ epilog=epilog,
+ add_help=add_help,
+ formatter_class=formatter_class)
+ self.subcommands = {}
+
+ def _find_actions(self, subparsers, actions_module):
+ for attr in (a for a in dir(actions_module) if a.startswith('do_')):
+ command = attr[3:].replace('_', '-')
+ callback = getattr(actions_module, attr)
+ desc = callback.__doc__ or ''
+ action_help = desc.strip()
+ arguments = getattr(callback, 'arguments', [])
+ subparser = subparsers.add_parser(command,
+ help=action_help,
+ description=desc,
+ add_help=False,
+ formatter_class=VstfHelpFormatter)
+ subparser.add_argument('-h', '--help',
+ action='help',
+ help=argparse.SUPPRESS)
+ self.subcommands[command] = subparser
+ for (args, kwargs) in arguments:
+ subparser.add_argument(*args, **kwargs)
+ subparser.set_defaults(func=callback)
+
+ def set_subcommand_parser(self, target, metavar="<subcommand>"):
+ subparsers = self.add_subparsers(metavar=metavar)
+ self._find_actions(subparsers, target)
+ return subparsers
+
+ def set_parser_to_subcommand(self, subparser, target):
+ self._find_actions(subparser, target)
+
+
+if __name__ == "__main__":
+ from vstf.common import test_func
+ parser = VstfParser(prog="vstf", description="test parser")
+ parser.set_subcommand_parser(test_func)
+ args = parser.parse_args(sys.argv[1:])
+ args.func(args)