From f036e9898a69f5041f9cde02e3652c29e2de1643 Mon Sep 17 00:00:00 2001 From: Ross Brattain Date: Mon, 5 Dec 2016 16:11:54 -0500 Subject: Add support for Python 3 Porting to Python3 using Openstack guidelines: https://wiki.openstack.org/wiki/Python3 This passes unittests on Python 3.5 and passes opnfv_smoke suite Updates: use six for urlparse and urlopen fix exception.message attribute removal run unittests on python3 use unitest.mock on python 3 fix open mock for vsperf fix float division by using delta/eplison comparison use unicode in StringIO use plugin/sample_config.yaml relative path from test case fixed apexlake unittests upgraded to mock 2.0.0 to match python3 unittest.mock features fixed flake8 issues implement safe JSON decode with oslo_serialization.jsonutils.dump_as_bytes() implement safe unicode encode/decode with oslo_utils.encodeutils heat: convert pub key file from bytes to unicode pkg_resources returns raw bytes, in python3 we have to decode this to utf-8 unicode so JSON can encode it for heat template JIRA: YARDSTICK-452 Change-Id: Ib80dd1d0c0eb0592acd832b82f6a7f8f7c20bfda Signed-off-by: Ross Brattain --- yardstick/benchmark/core/plugin.py | 7 ++++--- yardstick/benchmark/core/runner.py | 8 +++++--- yardstick/benchmark/core/scenario.py | 4 +++- yardstick/benchmark/core/task.py | 35 +++++++++++++++++++---------------- yardstick/benchmark/core/testcase.py | 19 +++++++++++-------- 5 files changed, 42 insertions(+), 31 deletions(-) (limited to 'yardstick/benchmark/core') diff --git a/yardstick/benchmark/core/plugin.py b/yardstick/benchmark/core/plugin.py index da12ce438..3080f5dd9 100644 --- a/yardstick/benchmark/core/plugin.py +++ b/yardstick/benchmark/core/plugin.py @@ -10,6 +10,7 @@ """ Handler for yardstick command 'plugin' """ from __future__ import print_function +from __future__ import absolute_import import os import sys import yaml @@ -182,7 +183,7 @@ class PluginParser(object): and a deployment instance """ - print ("Parsing plugin config:", self.path) + print("Parsing plugin config:", self.path) try: kw = {} @@ -191,10 +192,10 @@ class PluginParser(object): input_plugin = f.read() rendered_plugin = TaskTemplate.render(input_plugin, **kw) except Exception as e: - print(("Failed to render template:\n%(plugin)s\n%(err)s\n") + print("Failed to render template:\n%(plugin)s\n%(err)s\n" % {"plugin": input_plugin, "err": e}) raise e - print(("Input plugin is:\n%s\n") % rendered_plugin) + print("Input plugin is:\n%s\n" % rendered_plugin) cfg = yaml.load(rendered_plugin) except IOError as ioerror: diff --git a/yardstick/benchmark/core/runner.py b/yardstick/benchmark/core/runner.py index e8dd21a12..5f8132da8 100644 --- a/yardstick/benchmark/core/runner.py +++ b/yardstick/benchmark/core/runner.py @@ -9,6 +9,8 @@ """ Handler for yardstick command 'runner' """ +from __future__ import absolute_import +from __future__ import print_function from yardstick.benchmark.runners.base import Runner from yardstick.benchmark.core import print_hbar @@ -26,11 +28,11 @@ class Runners(object): print("| %-16s | %-60s" % ("Type", "Description")) print_hbar(78) for rtype in types: - print "| %-16s | %-60s" % (rtype.__execution_type__, - rtype.__doc__.split("\n")[0]) + print("| %-16s | %-60s" % (rtype.__execution_type__, + rtype.__doc__.split("\n")[0])) print_hbar(78) def show(self, args): '''Show details of a specific runner type''' rtype = Runner.get_cls(args.type[0]) - print rtype.__doc__ + print(rtype.__doc__) diff --git a/yardstick/benchmark/core/scenario.py b/yardstick/benchmark/core/scenario.py index e228054ee..15335afd4 100644 --- a/yardstick/benchmark/core/scenario.py +++ b/yardstick/benchmark/core/scenario.py @@ -9,6 +9,8 @@ """ Handler for yardstick command 'scenario' """ +from __future__ import absolute_import +from __future__ import print_function from yardstick.benchmark.scenarios.base import Scenario from yardstick.benchmark.core import print_hbar @@ -33,4 +35,4 @@ class Scenarios(object): def show(self, args): '''Show details of a specific scenario type''' stype = Scenario.get_cls(args.type[0]) - print stype.__doc__ + print(stype.__doc__) diff --git a/yardstick/benchmark/core/task.py b/yardstick/benchmark/core/task.py index 8fb117771..d9a85764a 100644 --- a/yardstick/benchmark/core/task.py +++ b/yardstick/benchmark/core/task.py @@ -9,6 +9,8 @@ """ Handler for yardstick command 'task' """ +from __future__ import absolute_import +from __future__ import print_function import sys import os import yaml @@ -18,7 +20,7 @@ import time import logging import uuid import errno -from itertools import ifilter +from six.moves import filter from yardstick.benchmark.contexts.base import Context from yardstick.benchmark.runners import base as base_runner @@ -71,7 +73,7 @@ class Task(object): # pragma: no cover one_task_start_time = time.time() parser.path = task_files[i] scenarios, run_in_parallel, meet_precondition = parser.parse_task( - self.task_id, task_args[i], task_args_fnames[i]) + self.task_id, task_args[i], task_args_fnames[i]) if not meet_precondition: LOG.info("meet_precondition is %s, please check envrionment", @@ -96,7 +98,7 @@ class Task(object): # pragma: no cover LOG.info("total finished in %d secs", total_end_time - total_start_time) - print "Done, exiting" + print("Done, exiting") def _run(self, scenarios, run_in_parallel, output_file): '''Deploys context and calls runners''' @@ -106,7 +108,7 @@ class Task(object): # pragma: no cover background_runners = [] # Start all background scenarios - for scenario in ifilter(_is_background_scenario, scenarios): + for scenario in filter(_is_background_scenario, scenarios): scenario["runner"] = dict(type="Duration", duration=1000000000) runner = run_one_scenario(scenario, output_file) background_runners.append(runner) @@ -121,14 +123,14 @@ class Task(object): # pragma: no cover # Wait for runners to finish for runner in runners: runner_join(runner) - print "Runner ended, output in", output_file + print("Runner ended, output in", output_file) else: # run serially for scenario in scenarios: if not _is_background_scenario(scenario): runner = run_one_scenario(scenario, output_file) runner_join(runner) - print "Runner ended, output in", output_file + print("Runner ended, output in", output_file) # Abort background runners for runner in background_runners: @@ -142,7 +144,7 @@ class Task(object): # pragma: no cover runner_join(runner) else: base_runner.Runner.release(runner) - print "Background task ended" + print("Background task ended") # TODO: Move stuff below into TaskCommands class !? @@ -150,6 +152,7 @@ class Task(object): # pragma: no cover class TaskParser(object): # pragma: no cover '''Parser for task config files in yaml format''' + def __init__(self, path): self.path = path @@ -224,7 +227,7 @@ class TaskParser(object): # pragma: no cover def parse_task(self, task_id, task_args=None, task_args_file=None): '''parses the task file and return an context and scenario instances''' - print "Parsing task config:", self.path + print("Parsing task config:", self.path) try: kw = {} @@ -241,10 +244,10 @@ class TaskParser(object): # pragma: no cover input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: - print(("Failed to render template:\n%(task)s\n%(err)s\n") + print("Failed to render template:\n%(task)s\n%(err)s\n" % {"task": input_task, "err": e}) raise e - print(("Input task is:\n%s\n") % rendered_task) + print("Input task is:\n%s\n" % rendered_task) cfg = yaml.load(rendered_task) except IOError as ioerror: @@ -343,7 +346,7 @@ def atexit_handler(): base_runner.Runner.terminate_all() if len(Context.list) > 0: - print "Undeploying all contexts" + print("Undeploying all contexts") for context in Context.list: context.undeploy() @@ -351,7 +354,7 @@ def atexit_handler(): def is_ip_addr(addr): '''check if string addr is an IP address''' try: - ipaddress.ip_address(unicode(addr)) + ipaddress.ip_address(addr.encode('utf-8')) return True except ValueError: return False @@ -434,7 +437,7 @@ def run_one_scenario(scenario_cfg, output_file): context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg) runner = base_runner.Runner.get(runner_cfg) - print "Starting runner of type '%s'" % runner_cfg["type"] + print("Starting runner of type '%s'" % runner_cfg["type"]) runner.run(scenario_cfg, context_cfg) return runner @@ -460,7 +463,7 @@ def runner_join(runner): def print_invalid_header(source_name, args): - print(("Invalid %(source)s passed:\n\n %(args)s\n") + print("Invalid %(source)s passed:\n\n %(args)s\n" % {"source": source_name, "args": args}) @@ -470,13 +473,13 @@ def parse_task_args(src_name, args): kw = {} if kw is None else kw except yaml.parser.ParserError as e: print_invalid_header(src_name, args) - print(("%(source)s has to be YAML. Details:\n\n%(err)s\n") + print("%(source)s has to be YAML. Details:\n\n%(err)s\n" % {"source": src_name, "err": e}) raise TypeError() if not isinstance(kw, dict): print_invalid_header(src_name, args) - print(("%(src)s had to be dict, actually %(src_type)s\n") + print("%(src)s had to be dict, actually %(src_type)s\n" % {"src": src_name, "src_type": type(kw)}) raise TypeError() return kw diff --git a/yardstick/benchmark/core/testcase.py b/yardstick/benchmark/core/testcase.py index d292ad2d7..74304857f 100644 --- a/yardstick/benchmark/core/testcase.py +++ b/yardstick/benchmark/core/testcase.py @@ -8,6 +8,8 @@ ############################################################################## """ Handler for yardstick command 'testcase' """ +from __future__ import absolute_import +from __future__ import print_function import os import yaml import sys @@ -22,6 +24,7 @@ class Testcase(object): Set of commands to discover and display test cases. ''' + def __init__(self): self.test_case_path = YARDSTICK_ROOT_PATH + 'tests/opnfv/test_cases/' self.testcase_list = [] @@ -32,7 +35,7 @@ class Testcase(object): try: testcase_files = os.listdir(self.test_case_path) except Exception as e: - print(("Failed to list dir:\n%(path)s\n%(err)s\n") + print("Failed to list dir:\n%(path)s\n%(err)s\n" % {"path": self.test_case_path, "err": e}) raise e testcase_files.sort() @@ -52,11 +55,11 @@ class Testcase(object): with open(testcase_path) as f: try: testcase_info = f.read() - print testcase_info + print(testcase_info) except Exception as e: - print(("Failed to load test cases:" - "\n%(testcase_file)s\n%(err)s\n") + print("Failed to load test cases:" + "\n%(testcase_file)s\n%(err)s\n" % {"testcase_file": testcase_path, "err": e}) raise e except IOError as ioerror: @@ -70,8 +73,8 @@ class Testcase(object): try: testcase_info = f.read() except Exception as e: - print(("Failed to load test cases:" - "\n%(testcase_file)s\n%(err)s\n") + print("Failed to load test cases:" + "\n%(testcase_file)s\n%(err)s\n" % {"testcase_file": testcase_file, "err": e}) raise e description, installer, deploy_scenarios = \ @@ -107,6 +110,6 @@ class Testcase(object): print("| %-21s | %-60s" % ("Testcase Name", "Description")) print_hbar(88) for testcase_record in testcase_list: - print "| %-16s | %-60s" % (testcase_record['Name'], - testcase_record['Description']) + print("| %-16s | %-60s" % (testcase_record['Name'], + testcase_record['Description'])) print_hbar(88) -- cgit 1.2.3-korg