diff options
-rw-r--r-- | func/args_handler.py | 11 | ||||
-rw-r--r-- | func/cli.py | 8 | ||||
-rw-r--r-- | func/driver.py | 15 | ||||
-rw-r--r-- | tests/args_handler_test.py | 6 | ||||
-rw-r--r-- | tests/cli_test.py | 13 | ||||
-rw-r--r-- | tests/driver_test.py | 21 |
6 files changed, 43 insertions, 31 deletions
diff --git a/func/args_handler.py b/func/args_handler.py index f2726eb8..57ecfcbd 100644 --- a/func/args_handler.py +++ b/func/args_handler.py @@ -48,12 +48,15 @@ def prepare_ansible_env(benchmark_test_case): return benchmark, benchmark_details, proxy_info, env_setup -def run_benchmark(benchmark, benchmark_details, proxy_info, env_setup, benchmark_test_case): +def run_benchmark(installer_type, pwd, benchmark, benchmark_details, + proxy_info, env_setup, benchmark_test_case): driver = Driver() - driver.drive_bench(benchmark, env_setup.roles_dict.items(), _get_f_name(benchmark_test_case), + driver.drive_bench(installer_type, pwd, benchmark, + env_setup.roles_dict.items(), _get_f_name(benchmark_test_case), benchmark_details, env_setup.ip_pw_dict.items(), proxy_info) -def prepare_and_run_benchmark(benchmark_test_case): +def prepare_and_run_benchmark(installer_type, pwd, benchmark_test_case): benchmark, benchmark_details, proxy_info, env_setup = prepare_ansible_env(benchmark_test_case) - run_benchmark(benchmark, benchmark_details, proxy_info, env_setup, benchmark_test_case) + run_benchmark(installer_type, pwd, benchmark, benchmark_details, + proxy_info, env_setup, benchmark_test_case) diff --git a/func/cli.py b/func/cli.py index 01694a9b..66ab2277 100644 --- a/func/cli.py +++ b/func/cli.py @@ -8,6 +8,7 @@ ############################################################################## import sys +import os import args_handler import argparse @@ -37,12 +38,12 @@ class cli: args = self._parse_args(args) if not args_handler.check_suit_in_test_list(args.file): - print '\n\n ERROR: Test File Does not exist in test_list/ please enter correct file \n\n' + print('\n\n ERROR: Test File Does not exist in test_list/ please enter correct file \n\n') sys.exit(0) if not args_handler.check_lab_name(args.lab): - print '\n\n You have specified a lab that is not present in test_cases/ please enter \ - correct file. If unsure how to proceed, use -l default.\n\n' + print('\n\n You have specified a lab that is not present in test_cases/ please enter \ + correct file. If unsure how to proceed, use -l default.\n\n') sys.exit(0) suite = args.file benchmarks = args_handler.get_files_in_test_list(suite) @@ -50,6 +51,7 @@ class cli: benchmarks_list = filter(lambda x: x in test_cases, benchmarks) map(lambda x: args_handler.prepare_and_run_benchmark( + os.environ['INSTALLER_TYPE'], os.environ['PWD'], args_handler.get_benchmark_path(args.lab.lower(), suite, x)), benchmarks_list) print('{0} is not a Template in the Directory Enter a Valid file name.' diff --git a/func/driver.py b/func/driver.py index 4ce402a4..859e7f34 100644 --- a/func/driver.py +++ b/func/driver.py @@ -6,7 +6,6 @@ # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import os import logging from func.ansible_api import AnsibleApi @@ -30,14 +29,15 @@ class Driver: z.update(y) return z - def get_common_var_json(self, benchmark_fname, benchmark_detail, pip_dict, proxy_info): + def get_common_var_json(self, installer_type, pwd, benchmark_fname, + benchmark_detail, pip_dict, proxy_info): common_json = {'Dest_dir': 'results', 'ip1': '', 'ip2': '', - 'installer': str(os.environ['INSTALLER_TYPE']), - 'workingdir': str(os.environ['PWD']), + 'installer': str(installer_type), + 'workingdir': str(pwd), 'fname': str(benchmark_fname), - 'username': self.installer_username[str(os.environ['INSTALLER_TYPE'])]} + 'username': self.installer_username[str(installer_type)]} common_json.update(benchmark_detail) if benchmark_detail else None common_json.update(proxy_info) if proxy_info else None return common_json @@ -61,11 +61,12 @@ class Driver: './data/QtipKey', extra_vars) return ansible_api.get_detail_playbook_stats() - def drive_bench(self, benchmark, roles, benchmark_fname, + def drive_bench(self, installer_type, pwd, benchmark, roles, benchmark_fname, benchmark_detail=None, pip_dict=None, proxy_info=None): roles = sorted(roles) pip_dict = sorted(pip_dict) - var_json = self.get_common_var_json(benchmark_fname, benchmark_detail, pip_dict, proxy_info) + var_json = self.get_common_var_json(installer_type, pwd, benchmark_fname, + benchmark_detail, pip_dict, proxy_info) map(lambda role: self.run_ansible_playbook (benchmark, self.merge_two_dicts(var_json, self.get_special_var_json(role, roles, diff --git a/tests/args_handler_test.py b/tests/args_handler_test.py index 7f977f21..ebf500f3 100644 --- a/tests/args_handler_test.py +++ b/tests/args_handler_test.py @@ -13,8 +13,8 @@ import func.args_handler class TestClass: @pytest.mark.parametrize("test_input, expected", [ - ('./test_cases/zte-pod1/network/iperf_bm.yaml', - ["iperf", + (['fuel', '/home', './test_cases/zte-pod1/network/iperf_bm.yaml'], + ['fuel', '/home', "iperf", [('1-server', ['10.20.0.23']), ('2-host', ['10.20.0.24'])], "iperf_bm.yaml", [('duration', 20), ('protocol', 'tcp'), ('bandwidthGbps', 10)], @@ -29,7 +29,7 @@ class TestClass: mock_env_setup_ssh, mock_update_ansible, test_input, expected): mock_ips = mock.Mock(return_value=["10.20.0.23", "10.20.0.24"]) func.args_handler.Env_setup.fetch_compute_ips = mock_ips - func.args_handler.prepare_and_run_benchmark(test_input) + func.args_handler.prepare_and_run_benchmark(test_input[0], test_input[1], test_input[2]) call = mock_driver.call_args call_args, call_kwargs = call assert sorted(map(sorted, call_args)) == sorted(map(sorted, expected)) diff --git a/tests/cli_test.py b/tests/cli_test.py index f9861dee..fe05327d 100644 --- a/tests/cli_test.py +++ b/tests/cli_test.py @@ -1,5 +1,6 @@ import pytest import mock +import os from func.cli import cli @@ -15,8 +16,11 @@ class TestClass: 'test'], "Test File Does not exist in test_list") ]) def test_cli_error(self, capfd, test_input, expected): + k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': 'fuel', 'PWD': '/home'}) with pytest.raises(SystemExit): + k.start() cli(test_input) + k.stop() resout, reserr = capfd.readouterr() assert expected in resout @@ -24,11 +28,14 @@ class TestClass: (['-l', 'zte-pod1', '-f', - 'storage'], [('./test_cases/zte-pod1/storage/fio_bm.yaml'), - ('./test_cases/zte-pod1/storage/fio_vm.yaml')]) + 'storage'], [('fuel', '/home', './test_cases/zte-pod1/storage/fio_bm.yaml'), + ('fuel', '/home', './test_cases/zte-pod1/storage/fio_vm.yaml')]) ]) @mock.patch('func.cli.args_handler.prepare_and_run_benchmark') def test_cli_successful(self, mock_args_handler, test_input, expected): + k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': 'fuel', 'PWD': '/home'}) + k.start() cli(test_input) - call_list = map(lambda x: mock_args_handler.call_args_list[x][0][0], range(len(expected))) + k.stop() + call_list = map(lambda x: mock_args_handler.call_args_list[x][0], range(len(expected))) assert sorted(call_list) == sorted(expected) diff --git a/tests/driver_test.py b/tests/driver_test.py index 9517d26d..71f01b0e 100644 --- a/tests/driver_test.py +++ b/tests/driver_test.py @@ -1,20 +1,20 @@ import pytest import mock -import os from func.driver import Driver class TestClass: @pytest.mark.parametrize("test_input, expected", [ - (["iperf", + (['fuel', + '/home', + "iperf", [('host', ['10.20.0.13', '10.20.0.15'])], "iperf_bm.yaml", [('duration', 20), ('protocol', 'tcp'), ('bandwidthGbps', 0)], [("10.20.0.13", [None]), ("10.20.0.15", [None])], {'http_proxy': 'http://10.20.0.1:8118', 'https_proxy': 'http://10.20.0.1:8118', - 'no_proxy': 'localhost,127.0.0.1,10.20.*,192.168.*'}, - 'fuel'], + 'no_proxy': 'localhost,127.0.0.1,10.20.*,192.168.*'}], [{'Dest_dir': 'results', 'ip1': '', 'ip2': '', @@ -29,13 +29,14 @@ class TestClass: 'protocol': 'tcp', 'bandwidthGbps': 0, "role": "host"}]), - (["iperf", + (['joid', + '/home', + "iperf", [('1-server', ['10.20.0.13']), ('2-host', ['10.20.0.15'])], "iperf_vm.yaml", [('duration', 20), ('protocol', 'tcp'), ('bandwidthGbps', 0)], [("10.20.0.13", [None]), ("10.20.0.15", [None])], - {}, - 'joid'], + {}], [{'Dest_dir': 'results', 'ip1': '10.20.0.13', 'ip2': '', @@ -63,12 +64,10 @@ class TestClass: @mock.patch('func.driver.AnsibleApi') def test_driver_success(self, mock_ansible, test_input, expected): mock_ansible.execute_playbook.return_value = True - k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': test_input[6], 'PWD': '/home'}) - k.start() dri = Driver() - dri.drive_bench(test_input[0], test_input[1], test_input[2], test_input[3], test_input[4], test_input[5]) + dri.drive_bench(test_input[0], test_input[1], test_input[2], test_input[3], + test_input[4], test_input[5], test_input[6], test_input[7]) call_list = mock_ansible.execute_playbook.call_args_list - k.stop() for call in call_list: call_args, call_kwargs = call real_call = call_args[3] |