summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorUlas Kozat <ulas.kozat@huawei.com>2015-12-28 16:05:13 -0800
committerUlas Kozat <ulas.kozat@huawei.com>2015-12-28 16:05:13 -0800
commitc772a1dbc7ace58d099570d41a889adf851c8ba8 (patch)
tree809aefa0dae407a1d9c12989f7e8f60891700d17 /tools
parente671a915d887ae8f7751a54bb07ecb7ed8f2f25b (diff)
Added networking-sfc from openstack project with merge date Dec 23 2015stable/coloradostable/brahmaputra
Added patch 13 for subject "add missing db migration files" Change-Id: Id51a160335a14870c1dd816a44baf9b1958b9ac6
Diffstat (limited to 'tools')
-rw-r--r--tools/check_i18n.py153
-rw-r--r--tools/check_i18n_test_case.txt67
-rwxr-xr-xtools/check_unit_test_structure.sh52
-rwxr-xr-xtools/clean.sh5
-rw-r--r--tools/i18n_cfg.py97
-rw-r--r--tools/install_venv.py72
-rw-r--r--tools/install_venv_common.py172
-rwxr-xr-xtools/pretty_tox.sh6
-rwxr-xr-xtools/subunit-trace.py307
-rwxr-xr-xtools/tox_install.sh22
-rwxr-xr-xtools/with_venv.sh19
11 files changed, 972 insertions, 0 deletions
diff --git a/tools/check_i18n.py b/tools/check_i18n.py
new file mode 100644
index 0000000..697ad18
--- /dev/null
+++ b/tools/check_i18n.py
@@ -0,0 +1,153 @@
+# Copyright 2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from __future__ import print_function
+
+import compiler
+import imp
+import os.path
+import sys
+
+
+def is_localized(node):
+ """Check message wrapped by _()"""
+ if isinstance(node.parent, compiler.ast.CallFunc):
+ if isinstance(node.parent.node, compiler.ast.Name):
+ if node.parent.node.name == '_':
+ return True
+ return False
+
+
+class ASTWalker(compiler.visitor.ASTVisitor):
+
+ def default(self, node, *args):
+ for child in node.getChildNodes():
+ child.parent = node
+ compiler.visitor.ASTVisitor.default(self, node, *args)
+
+
+class Visitor(object):
+
+ def __init__(self, filename, i18n_msg_predicates,
+ msg_format_checkers, debug):
+ self.filename = filename
+ self.debug = debug
+ self.error = 0
+ self.i18n_msg_predicates = i18n_msg_predicates
+ self.msg_format_checkers = msg_format_checkers
+ with open(filename) as f:
+ self.lines = f.readlines()
+
+ def visitConst(self, node):
+ if not isinstance(node.value, str):
+ return
+
+ if is_localized(node):
+ for (checker, msg) in self.msg_format_checkers:
+ if checker(node):
+ print('%s:%d %s: %s Error: %s' %
+ (self.filename, node.lineno,
+ self.lines[node.lineno - 1][:-1],
+ checker.__name__, msg),
+ file=sys.stderr)
+ self.error = 1
+ return
+ if debug:
+ print('%s:%d %s: %s' %
+ (self.filename, node.lineno,
+ self.lines[node.lineno - 1][:-1],
+ "Pass"))
+ else:
+ for (predicate, action, msg) in self.i18n_msg_predicates:
+ if predicate(node):
+ if action == 'skip':
+ if debug:
+ print('%s:%d %s: %s' %
+ (self.filename, node.lineno,
+ self.lines[node.lineno - 1][:-1],
+ "Pass"))
+ return
+ elif action == 'error':
+ print('%s:%d %s: %s Error: %s' %
+ (self.filename, node.lineno,
+ self.lines[node.lineno - 1][:-1],
+ predicate.__name__, msg),
+ file=sys.stderr)
+ self.error = 1
+ return
+ elif action == 'warn':
+ print('%s:%d %s: %s' %
+ (self.filename, node.lineno,
+ self.lines[node.lineno - 1][:-1],
+ "Warn: %s" % msg))
+ return
+ print('Predicate with wrong action!', file=sys.stderr)
+
+
+def is_file_in_black_list(black_list, f):
+ for f in black_list:
+ if os.path.abspath(input_file).startswith(
+ os.path.abspath(f)):
+ return True
+ return False
+
+
+def check_i18n(input_file, i18n_msg_predicates, msg_format_checkers, debug):
+ input_mod = compiler.parseFile(input_file)
+ v = compiler.visitor.walk(input_mod,
+ Visitor(input_file,
+ i18n_msg_predicates,
+ msg_format_checkers,
+ debug),
+ ASTWalker())
+ return v.error
+
+
+if __name__ == '__main__':
+ input_path = sys.argv[1]
+ cfg_path = sys.argv[2]
+ try:
+ cfg_mod = imp.load_source('', cfg_path)
+ except Exception:
+ print("Load cfg module failed", file=sys.stderr)
+ sys.exit(1)
+
+ i18n_msg_predicates = cfg_mod.i18n_msg_predicates
+ msg_format_checkers = cfg_mod.msg_format_checkers
+ black_list = cfg_mod.file_black_list
+
+ debug = False
+ if len(sys.argv) > 3:
+ if sys.argv[3] == '-d':
+ debug = True
+
+ if os.path.isfile(input_path):
+ sys.exit(check_i18n(input_path,
+ i18n_msg_predicates,
+ msg_format_checkers,
+ debug))
+
+ error = 0
+ for dirpath, dirs, files in os.walk(input_path):
+ for f in files:
+ if not f.endswith('.py'):
+ continue
+ input_file = os.path.join(dirpath, f)
+ if is_file_in_black_list(black_list, input_file):
+ continue
+ if check_i18n(input_file,
+ i18n_msg_predicates,
+ msg_format_checkers,
+ debug):
+ error = 1
+ sys.exit(error)
diff --git a/tools/check_i18n_test_case.txt b/tools/check_i18n_test_case.txt
new file mode 100644
index 0000000..3d1391d
--- /dev/null
+++ b/tools/check_i18n_test_case.txt
@@ -0,0 +1,67 @@
+# test-case for check_i18n.py
+# python check_i18n.py check_i18n.txt -d
+
+# message format checking
+# capital checking
+msg = _("hello world, error")
+msg = _("hello world_var, error")
+msg = _('file_list xyz, pass')
+msg = _("Hello world, pass")
+
+# format specifier checking
+msg = _("Hello %s world %d, error")
+msg = _("Hello %s world, pass")
+msg = _("Hello %(var1)s world %(var2)s, pass")
+
+# message has been localized
+# is_localized
+msg = _("Hello world, pass")
+msg = _("Hello world, pass") % var
+LOG.debug(_('Hello world, pass'))
+LOG.info(_('Hello world, pass'))
+raise x.y.Exception(_('Hello world, pass'))
+raise Exception(_('Hello world, pass'))
+
+# message need be localized
+# is_log_callfunc
+LOG.debug('hello world, error')
+LOG.debug('hello world, error' % xyz)
+sys.append('hello world, warn')
+
+# is_log_i18n_msg_with_mod
+LOG.debug(_('Hello world, error') % xyz)
+
+# default warn
+msg = 'hello world, warn'
+msg = 'hello world, warn' % var
+
+# message needn't be localized
+# skip only one word
+msg = ''
+msg = "hello,pass"
+
+# skip dict
+msg = {'hello world, pass': 1}
+
+# skip list
+msg = ["hello world, pass"]
+
+# skip subscript
+msg['hello world, pass']
+
+# skip xml marker
+msg = "<test><t></t></test>, pass"
+
+# skip sql statement
+msg = "SELECT * FROM xyz WHERE hello=1, pass"
+msg = "select * from xyz, pass"
+
+# skip add statement
+msg = 'hello world' + e + 'world hello, pass'
+
+# skip doc string
+"""
+Hello world, pass
+"""
+class Msg:
+ pass
diff --git a/tools/check_unit_test_structure.sh b/tools/check_unit_test_structure.sh
new file mode 100755
index 0000000..1aa3841
--- /dev/null
+++ b/tools/check_unit_test_structure.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# This script identifies the unit test modules that do not correspond
+# directly with a module in the code tree. See TESTING.rst for the
+# intended structure.
+
+neutron_path=$(cd "$(dirname "$0")/.." && pwd)
+base_test_path=networking_sfc/tests/unit
+test_path=$neutron_path/$base_test_path
+
+test_files=$(find ${test_path} -iname 'test_*.py')
+
+ignore_regexes=(
+ "^plugins.*$"
+)
+
+error_count=0
+ignore_count=0
+total_count=0
+for test_file in ${test_files[@]}; do
+ relative_path=${test_file#$test_path/}
+ expected_path=$(dirname $neutron_path/networking_sfc/$relative_path)
+ test_filename=$(basename "$test_file")
+ expected_filename=${test_filename#test_}
+ # Module filename (e.g. foo/bar.py -> foo/test_bar.py)
+ filename=$expected_path/$expected_filename
+ # Package dir (e.g. foo/ -> test_foo.py)
+ package_dir=${filename%.py}
+ if [ ! -f "$filename" ] && [ ! -d "$package_dir" ]; then
+ for ignore_regex in ${ignore_regexes[@]}; do
+ if [[ "$relative_path" =~ $ignore_regex ]]; then
+ ((ignore_count++))
+ continue 2
+ fi
+ done
+ echo "Unexpected test file: $base_test_path/$relative_path"
+ ((error_count++))
+ fi
+ ((total_count++))
+done
+
+if [ "$ignore_count" -ne 0 ]; then
+ echo "$ignore_count unmatched test modules were ignored"
+fi
+
+if [ "$error_count" -eq 0 ]; then
+ echo 'Success! All test modules match targets in the code tree.'
+ exit 0
+else
+ echo "Failure! $error_count of $total_count test modules do not match targets in the code tree."
+ exit 1
+fi
diff --git a/tools/clean.sh b/tools/clean.sh
new file mode 100755
index 0000000..b79f035
--- /dev/null
+++ b/tools/clean.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes
+rm -rf */*.deb
+rm -rf ./plugins/**/build/ ./plugins/**/dist
+rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-*
diff --git a/tools/i18n_cfg.py b/tools/i18n_cfg.py
new file mode 100644
index 0000000..5ad1a51
--- /dev/null
+++ b/tools/i18n_cfg.py
@@ -0,0 +1,97 @@
+import compiler
+import re
+
+
+def is_log_callfunc(n):
+ """LOG.xxx('hello %s' % xyz) and LOG('hello')"""
+ if isinstance(n.parent, compiler.ast.Mod):
+ n = n.parent
+ if isinstance(n.parent, compiler.ast.CallFunc):
+ if isinstance(n.parent.node, compiler.ast.Getattr):
+ if isinstance(n.parent.node.getChildNodes()[0],
+ compiler.ast.Name):
+ if n.parent.node.getChildNodes()[0].name == 'LOG':
+ return True
+ return False
+
+
+def is_log_i18n_msg_with_mod(n):
+ """LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)"""
+ if not isinstance(n.parent.parent, compiler.ast.Mod):
+ return False
+ n = n.parent.parent
+ if isinstance(n.parent, compiler.ast.CallFunc):
+ if isinstance(n.parent.node, compiler.ast.Getattr):
+ if isinstance(n.parent.node.getChildNodes()[0],
+ compiler.ast.Name):
+ if n.parent.node.getChildNodes()[0].name == 'LOG':
+ return True
+ return False
+
+
+def is_wrong_i18n_format(n):
+ """Check _('hello %s' % xyz)"""
+ if isinstance(n.parent, compiler.ast.Mod):
+ n = n.parent
+ if isinstance(n.parent, compiler.ast.CallFunc):
+ if isinstance(n.parent.node, compiler.ast.Name):
+ if n.parent.node.name == '_':
+ return True
+ return False
+
+
+"""
+Used for check message need be localized or not.
+(predicate_func, action, message)
+"""
+i18n_msg_predicates = [
+ # Skip ['hello world', 1]
+ (lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''),
+ # Skip {'hellow world', 1}
+ (lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''),
+ # Skip msg['hello world']
+ (lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''),
+ # Skip doc string
+ (lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''),
+ # Skip msg = "hello", in normal, message should more than one word
+ (lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''),
+ # Skip msg = 'hello world' + vars + 'world hello'
+ (lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''),
+ # Skip xml markers msg = "<test></test>"
+ (lambda n: len(re.compile("</.*>").findall(n.value)) > 0, 'skip', ''),
+ # Skip sql statement
+ (lambda n: len(
+ re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0,
+ 'skip', ''),
+ # LOG.xxx()
+ (is_log_callfunc, 'error', 'Message must be localized'),
+ # _('hello %s' % xyz) should be _('hello %s') % xyz
+ (is_wrong_i18n_format, 'error',
+ ("Message format was wrong, _('hello %s' % xyz) "
+ "should be _('hello %s') % xyz")),
+ # default
+ (lambda n: True, 'warn', 'Message might need localized')
+]
+
+
+"""
+Used for checking message format. (checker_func, message)
+"""
+msg_format_checkers = [
+ # If message contain more than on format specifier, it should use
+ # mapping key
+ (lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1,
+ "The message shouldn't contain more than one format specifier"),
+ # Check capital
+ (lambda n: n.value.split(' ')[0].count('_') == 0 and
+ n.value[0].isalpha() and
+ n.value[0].islower(),
+ "First letter must be capital"),
+ (is_log_i18n_msg_with_mod,
+ 'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)')
+]
+
+
+file_black_list = ["./neutron/tests/unit",
+ "./neutron/openstack",
+ "./neutron/plugins/bigswitch/tests"]
diff --git a/tools/install_venv.py b/tools/install_venv.py
new file mode 100644
index 0000000..f8fb8fa
--- /dev/null
+++ b/tools/install_venv.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Copyright 2010 OpenStack Foundation.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Installation script for Neutron's development virtualenv
+"""
+from __future__ import print_function
+
+import os
+import sys
+
+import install_venv_common as install_venv
+
+
+def print_help():
+ help = """
+ Neutron development environment setup is complete.
+
+ Neutron development uses virtualenv to track and manage Python dependencies
+ while in development and testing.
+
+ To activate the Neutron virtualenv for the extent of your current shell
+ session you can run:
+
+ $ source .venv/bin/activate
+
+ Or, if you prefer, you can run commands in the virtualenv on a case by case
+ basis by running:
+
+ $ tools/with_venv.sh <your command>
+
+ Also, make test will automatically use the virtualenv.
+ """
+ print(help)
+
+
+def main(argv):
+ root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ venv = os.path.join(root, '.venv')
+ pip_requires = os.path.join(root, 'requirements.txt')
+ test_requires = os.path.join(root, 'test-requirements.txt')
+ py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
+ project = 'Neutron'
+ install = install_venv.InstallVenv(root, venv, pip_requires, test_requires,
+ py_version, project)
+ options = install.parse_args(argv)
+ install.check_python_version()
+ install.check_dependencies()
+ install.create_virtualenv(no_site_packages=options.no_site_packages)
+ install.install_dependencies()
+ print_help()
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
new file mode 100644
index 0000000..e279159
--- /dev/null
+++ b/tools/install_venv_common.py
@@ -0,0 +1,172 @@
+# Copyright 2013 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Provides methods needed by installation script for OpenStack development
+virtual environments.
+
+Since this script is used to bootstrap a virtualenv from the system's Python
+environment, it should be kept strictly compatible with Python 2.6.
+
+Synced in from openstack-common
+"""
+
+from __future__ import print_function
+
+import optparse
+import os
+import subprocess
+import sys
+
+
+class InstallVenv(object):
+
+ def __init__(self, root, venv, requirements,
+ test_requirements, py_version,
+ project):
+ self.root = root
+ self.venv = venv
+ self.requirements = requirements
+ self.test_requirements = test_requirements
+ self.py_version = py_version
+ self.project = project
+
+ def die(self, message, *args):
+ print(message % args, file=sys.stderr)
+ sys.exit(1)
+
+ def check_python_version(self):
+ if sys.version_info < (2, 6):
+ self.die("Need Python Version >= 2.6")
+
+ def run_command_with_code(self, cmd, redirect_output=True,
+ check_exit_code=True):
+ """Runs a command in an out-of-process shell.
+
+ Returns the output of that command. Working directory is self.root.
+ """
+ if redirect_output:
+ stdout = subprocess.PIPE
+ else:
+ stdout = None
+
+ proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
+ output = proc.communicate()[0]
+ if check_exit_code and proc.returncode != 0:
+ self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
+ return (output, proc.returncode)
+
+ def run_command(self, cmd, redirect_output=True, check_exit_code=True):
+ return self.run_command_with_code(cmd, redirect_output,
+ check_exit_code)[0]
+
+ def get_distro(self):
+ if (os.path.exists('/etc/fedora-release') or
+ os.path.exists('/etc/redhat-release')):
+ return Fedora(
+ self.root, self.venv, self.requirements,
+ self.test_requirements, self.py_version, self.project)
+ else:
+ return Distro(
+ self.root, self.venv, self.requirements,
+ self.test_requirements, self.py_version, self.project)
+
+ def check_dependencies(self):
+ self.get_distro().install_virtualenv()
+
+ def create_virtualenv(self, no_site_packages=True):
+ """Creates the virtual environment and installs PIP.
+
+ Creates the virtual environment and installs PIP only into the
+ virtual environment.
+ """
+ if not os.path.isdir(self.venv):
+ print('Creating venv...', end=' ')
+ if no_site_packages:
+ self.run_command(['virtualenv', '-q', '--no-site-packages',
+ self.venv])
+ else:
+ self.run_command(['virtualenv', '-q', self.venv])
+ print('done.')
+ else:
+ print("venv already exists...")
+ pass
+
+ def pip_install(self, *args):
+ self.run_command(['tools/with_venv.sh',
+ 'pip', 'install', '--upgrade'] + list(args),
+ redirect_output=False)
+
+ def install_dependencies(self):
+ print('Installing dependencies with pip (this can take a while)...')
+
+ # First things first, make sure our venv has the latest pip and
+ # setuptools and pbr
+ self.pip_install('pip>=1.4')
+ self.pip_install('setuptools')
+ self.pip_install('pbr')
+
+ self.pip_install('-r', self.requirements, '-r', self.test_requirements)
+
+ def parse_args(self, argv):
+ """Parses command-line arguments."""
+ parser = optparse.OptionParser()
+ parser.add_option('-n', '--no-site-packages',
+ action='store_true',
+ help="Do not inherit packages from global Python "
+ "install.")
+ return parser.parse_args(argv[1:])[0]
+
+
+class Distro(InstallVenv):
+
+ def check_cmd(self, cmd):
+ return bool(self.run_command(['which', cmd],
+ check_exit_code=False).strip())
+
+ def install_virtualenv(self):
+ if self.check_cmd('virtualenv'):
+ return
+
+ if self.check_cmd('easy_install'):
+ print('Installing virtualenv via easy_install...', end=' ')
+ if self.run_command(['easy_install', 'virtualenv']):
+ print('Succeeded')
+ return
+ else:
+ print('Failed')
+
+ self.die('ERROR: virtualenv not found.\n\n%s development'
+ ' requires virtualenv, please install it using your'
+ ' favorite package management tool' % self.project)
+
+
+class Fedora(Distro):
+ """This covers all Fedora-based distributions.
+
+ Includes: Fedora, RHEL, CentOS, Scientific Linux
+ """
+
+ def check_pkg(self, pkg):
+ return self.run_command_with_code(['rpm', '-q', pkg],
+ check_exit_code=False)[1] == 0
+
+ def install_virtualenv(self):
+ if self.check_cmd('virtualenv'):
+ return
+
+ if not self.check_pkg('python-virtualenv'):
+ self.die("Please install 'python-virtualenv'.")
+
+ super(Fedora, self).install_virtualenv()
diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh
new file mode 100755
index 0000000..3ed73c1
--- /dev/null
+++ b/tools/pretty_tox.sh
@@ -0,0 +1,6 @@
+#! /bin/sh
+
+TESTRARGS=$1
+
+exec 3>&1
+status=$(exec 4>&1 >&3; (python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | $(dirname $0)/subunit-trace.py -f) && exit $status
diff --git a/tools/subunit-trace.py b/tools/subunit-trace.py
new file mode 100755
index 0000000..73f2f10
--- /dev/null
+++ b/tools/subunit-trace.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Hewlett-Packard Development Company, L.P.
+# Copyright 2014 Samsung Electronics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Trace a subunit stream in reasonable detail and high accuracy."""
+
+import argparse
+import functools
+import os
+import re
+import sys
+
+import mimeparse
+import subunit
+import testtools
+
+DAY_SECONDS = 60 * 60 * 24
+FAILS = []
+RESULTS = {}
+
+
+class Starts(testtools.StreamResult):
+
+ def __init__(self, output):
+ super(Starts, self).__init__()
+ self._output = output
+
+ def startTestRun(self):
+ self._neednewline = False
+ self._emitted = set()
+
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ super(Starts, self).status(
+ test_id, test_status,
+ test_tags=test_tags, runnable=runnable, file_name=file_name,
+ file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+ route_code=route_code, timestamp=timestamp)
+ if not test_id:
+ if not file_bytes:
+ return
+ if not mime_type or mime_type == 'test/plain;charset=utf8':
+ mime_type = 'text/plain; charset=utf-8'
+ primary, sub, parameters = mimeparse.parse_mime_type(mime_type)
+ content_type = testtools.content_type.ContentType(
+ primary, sub, parameters)
+ content = testtools.content.Content(
+ content_type, lambda: [file_bytes])
+ text = content.as_text()
+ if text and text[-1] not in '\r\n':
+ self._neednewline = True
+ self._output.write(text)
+ elif test_status == 'inprogress' and test_id not in self._emitted:
+ if self._neednewline:
+ self._neednewline = False
+ self._output.write('\n')
+ worker = ''
+ for tag in test_tags or ():
+ if tag.startswith('worker-'):
+ worker = '(' + tag[7:] + ') '
+ if timestamp:
+ timestr = timestamp.isoformat()
+ else:
+ timestr = ''
+ self._output.write('%s: %s%s [start]\n' %
+ (timestr, worker, test_id))
+ self._emitted.add(test_id)
+
+
+def cleanup_test_name(name, strip_tags=True, strip_scenarios=False):
+ """Clean up the test name for display.
+
+ By default we strip out the tags in the test because they don't help us
+ in identifying the test that is run to it's result.
+
+ Make it possible to strip out the testscenarios information (not to
+ be confused with tempest scenarios) however that's often needed to
+ indentify generated negative tests.
+ """
+ if strip_tags:
+ tags_start = name.find('[')
+ tags_end = name.find(']')
+ if tags_start > 0 and tags_end > tags_start:
+ newname = name[:tags_start]
+ newname += name[tags_end + 1:]
+ name = newname
+
+ if strip_scenarios:
+ tags_start = name.find('(')
+ tags_end = name.find(')')
+ if tags_start > 0 and tags_end > tags_start:
+ newname = name[:tags_start]
+ newname += name[tags_end + 1:]
+ name = newname
+
+ return name
+
+
+def get_duration(timestamps):
+ start, end = timestamps
+ if not start or not end:
+ duration = ''
+ else:
+ delta = end - start
+ duration = '%d.%06ds' % (
+ delta.days * DAY_SECONDS + delta.seconds, delta.microseconds)
+ return duration
+
+
+def find_worker(test):
+ for tag in test['tags']:
+ if tag.startswith('worker-'):
+ return int(tag[7:])
+ return 'NaN'
+
+
+# Print out stdout/stderr if it exists, always
+def print_attachments(stream, test, all_channels=False):
+ """Print out subunit attachments.
+
+ Print out subunit attachments that contain content. This
+ runs in 2 modes, one for successes where we print out just stdout
+ and stderr, and an override that dumps all the attachments.
+ """
+ channels = ('stdout', 'stderr')
+ for name, detail in test['details'].items():
+ # NOTE(sdague): the subunit names are a little crazy, and actually
+ # are in the form pythonlogging:'' (with the colon and quotes)
+ name = name.split(':')[0]
+ if detail.content_type.type == 'test':
+ detail.content_type.type = 'text'
+ if (all_channels or name in channels) and detail.as_text():
+ title = "Captured %s:" % name
+ stream.write("\n%s\n%s\n" % (title, ('~' * len(title))))
+ # indent attachment lines 4 spaces to make them visually
+ # offset
+ for line in detail.as_text().split('\n'):
+ stream.write(" %s\n" % line)
+
+
+def show_outcome(stream, test, print_failures=False, failonly=False):
+ global RESULTS
+ status = test['status']
+ # TODO(sdague): ask lifeless why on this?
+ if status == 'exists':
+ return
+
+ worker = find_worker(test)
+ name = cleanup_test_name(test['id'])
+ duration = get_duration(test['timestamps'])
+
+ if worker not in RESULTS:
+ RESULTS[worker] = []
+ RESULTS[worker].append(test)
+
+ # don't count the end of the return code as a fail
+ if name == 'process-returncode':
+ return
+
+ if status == 'fail':
+ FAILS.append(test)
+ stream.write('{%s} %s [%s] ... FAILED\n' % (
+ worker, name, duration))
+ if not print_failures:
+ print_attachments(stream, test, all_channels=True)
+ elif not failonly:
+ if status == 'success':
+ stream.write('{%s} %s [%s] ... ok\n' % (
+ worker, name, duration))
+ print_attachments(stream, test)
+ elif status == 'skip':
+ stream.write('{%s} %s ... SKIPPED: %s\n' % (
+ worker, name, test['details']['reason'].as_text()))
+ else:
+ stream.write('{%s} %s [%s] ... %s\n' % (
+ worker, name, duration, test['status']))
+ if not print_failures:
+ print_attachments(stream, test, all_channels=True)
+
+ stream.flush()
+
+
+def print_fails(stream):
+ """Print summary failure report.
+
+ Currently unused, however there remains debate on inline vs. at end
+ reporting, so leave the utility function for later use.
+ """
+ if not FAILS:
+ return
+ stream.write("\n==============================\n")
+ stream.write("Failed %s tests - output below:" % len(FAILS))
+ stream.write("\n==============================\n")
+ for f in FAILS:
+ stream.write("\n%s\n" % f['id'])
+ stream.write("%s\n" % ('-' * len(f['id'])))
+ print_attachments(stream, f, all_channels=True)
+ stream.write('\n')
+
+
+def count_tests(key, value):
+ count = 0
+ for k, v in RESULTS.items():
+ for item in v:
+ if key in item:
+ if re.search(value, item[key]):
+ count += 1
+ return count
+
+
+def run_time():
+ runtime = 0.0
+ for k, v in RESULTS.items():
+ for test in v:
+ runtime += float(get_duration(test['timestamps']).strip('s'))
+ return runtime
+
+
+def worker_stats(worker):
+ tests = RESULTS[worker]
+ num_tests = len(tests)
+ delta = tests[-1]['timestamps'][1] - tests[0]['timestamps'][0]
+ return num_tests, delta
+
+
+def print_summary(stream):
+ stream.write("\n======\nTotals\n======\n")
+ stream.write("Run: %s in %s sec.\n" % (count_tests('status', '.*'),
+ run_time()))
+ stream.write(" - Passed: %s\n" % count_tests('status', 'success'))
+ stream.write(" - Skipped: %s\n" % count_tests('status', 'skip'))
+ stream.write(" - Failed: %s\n" % count_tests('status', 'fail'))
+
+ # we could have no results, especially as we filter out the process-codes
+ if RESULTS:
+ stream.write("\n==============\nWorker Balance\n==============\n")
+
+ for w in range(max(RESULTS.keys()) + 1):
+ if w not in RESULTS:
+ stream.write(
+ " - WARNING: missing Worker %s! "
+ "Race in testr accounting.\n" % w)
+ else:
+ num, time = worker_stats(w)
+ stream.write(" - Worker %s (%s tests) => %ss\n" %
+ (w, num, time))
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--no-failure-debug', '-n', action='store_true',
+ dest='print_failures', help='Disable printing failure '
+ 'debug information in realtime')
+ parser.add_argument('--fails', '-f', action='store_true',
+ dest='post_fails', help='Print failure debug '
+ 'information after the stream is proccesed')
+ parser.add_argument('--failonly', action='store_true',
+ dest='failonly', help="Don't print success items",
+ default=(
+ os.environ.get('TRACE_FAILONLY', False)
+ is not False))
+ return parser.parse_args()
+
+
+def main():
+ args = parse_args()
+ stream = subunit.ByteStreamToStreamResult(
+ sys.stdin, non_subunit_name='stdout')
+ starts = Starts(sys.stdout)
+ outcomes = testtools.StreamToDict(
+ functools.partial(show_outcome, sys.stdout,
+ print_failures=args.print_failures,
+ failonly=args.failonly
+ ))
+ summary = testtools.StreamSummary()
+ result = testtools.CopyStreamResult([starts, outcomes, summary])
+ result.startTestRun()
+ try:
+ stream.run(result)
+ finally:
+ result.stopTestRun()
+ if count_tests('status', '.*') == 0:
+ print("The test run didn't actually run any tests")
+ return 1
+ if args.post_fails:
+ print_fails(sys.stdout)
+ print_summary(sys.stdout)
+ return (0 if summary.wasSuccessful() else 1)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/tox_install.sh b/tools/tox_install.sh
new file mode 100755
index 0000000..75b91a3
--- /dev/null
+++ b/tools/tox_install.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# Many of neutron's repos suffer from the problem of depending on neutron,
+# but it not existing on pypi.
+
+# This wrapper for tox's package installer will use the existing package
+# if it exists, else use zuul-cloner if that program exists, else grab it
+# from neutron master via a hard-coded URL. That last case should only
+# happen with devs running unit tests locally.
+
+# From the tox.ini config page:
+# install_command=ARGV
+# default:
+# pip install {opts} {packages}
+
+set -e
+
+echo "PIP HARDCODE" > /tmp/tox_install.txt
+pip install -U -egit+https://git.openstack.org/openstack/neutron@stable/liberty#egg=neutron
+
+pip install -U $*
+exit $?
diff --git a/tools/with_venv.sh b/tools/with_venv.sh
new file mode 100755
index 0000000..5fb07ea
--- /dev/null
+++ b/tools/with_venv.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+# Copyright 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+TOOLS=`dirname $0`
+VENV=$TOOLS/../.venv
+source $VENV/bin/activate && "$@"