aboutsummaryrefslogtreecommitdiffstats
path: root/legacy
diff options
context:
space:
mode:
authorzhihui wu <wu.zhihui1@zte.com.cn>2017-09-18 13:46:43 +0800
committerzhihui wu <wu.zhihui1@zte.com.cn>2017-09-18 13:46:43 +0800
commit5d67fdf522b7ba460f8337e3c6cfea3b340c46f8 (patch)
tree23ebd4b11ccc15e49242fc13803897ebdc6e28d5 /legacy
parent25086394f9ebd7bd90698ea54734f27ee9d4517e (diff)
delete legacy code from branch stable/euphrates
This directory doesn't need to publish in euphrates. Change-Id: I2d88b85c272ca3df6cc504e08968e9432e2dd96d Signed-off-by: zhihui wu <wu.zhihui1@zte.com.cn>
Diffstat (limited to 'legacy')
-rw-r--r--legacy/DO-NOT-DELETE2
-rw-r--r--legacy/__init__.py8
-rw-r--r--legacy/api/__init__.py0
-rw-r--r--legacy/api/cmd/__init__.py0
-rw-r--r--legacy/api/cmd/server.py31
-rw-r--r--legacy/api/handler/__init__.py0
-rw-r--r--legacy/api/handler/db.py98
-rw-r--r--legacy/api/handler/job_handler.py174
-rw-r--r--legacy/api/handler/result_handler.py58
-rw-r--r--legacy/api/model/__init__.py0
-rw-r--r--legacy/api/model/job_model.py33
-rw-r--r--legacy/api/router/__init__.py0
-rw-r--r--legacy/api/router/mapper.py15
-rw-r--r--legacy/api/router/mapper.py.orig19
-rw-r--r--legacy/assets/perftest/common/git_proxy_pbook.yaml19
-rw-r--r--legacy/assets/perftest/common/sys_proxy_pbook.yaml61
-rw-r--r--legacy/assets/perftest/etc/fio_test_job13
-rw-r--r--legacy/assets/perftest/etc/info_collect.py94
-rw-r--r--legacy/assets/perftest/fio.yaml120
-rw-r--r--legacy/assets/perftest/iperf.yaml170
-rw-r--r--legacy/assets/perftest/summary23
-rw-r--r--legacy/assets/testplan/default/network/iperf_bm.yaml58
-rw-r--r--legacy/assets/testplan/default/network/iperf_vm.yaml51
-rw-r--r--legacy/assets/testplan/default/network/iperf_vm_2.yaml52
-rw-r--r--legacy/assets/testplan/default/storage/fio_bm.yaml47
-rw-r--r--legacy/assets/testplan/default/storage/fio_vm.yaml52
-rw-r--r--legacy/cli/helper.py14
-rw-r--r--legacy/config/SampleHeat.yaml74
-rw-r--r--legacy/docker/README.md11
-rw-r--r--legacy/docker/cleanup_qtip_image.sh24
-rw-r--r--legacy/docker/prepare_qtip_image.sh49
-rwxr-xr-xlegacy/docker/push_db.sh3
-rwxr-xr-xlegacy/docker/run_qtip.sh39
-rw-r--r--legacy/docs/_01-compute.rst104
-rw-r--r--legacy/docs/_02-network.rst61
-rw-r--r--legacy/docs/_03-storage.rst31
-rw-r--r--legacy/docs/_testcase_description.rst46
-rw-r--r--legacy/docs/annex.rst18
-rw-r--r--legacy/docs/apidocs/qtip_restful_api.rst10
-rw-r--r--legacy/docs/benchmark-suites.rst15
-rw-r--r--legacy/docs/download/sample_config.yaml58
-rw-r--r--legacy/docs/index.rst13
-rw-r--r--legacy/docs/introduction.rst381
-rw-r--r--legacy/docs/overview/index.rst14
-rw-r--r--legacy/docs/overview/overview.rst21
-rw-r--r--legacy/driver/playbook/bwn_ng.yaml25
-rw-r--r--legacy/driver/playbook/top.yaml12
-rw-r--r--legacy/scripts/__init__.py0
-rw-r--r--legacy/scripts/ref_results/__init__.py0
-rw-r--r--legacy/scripts/ref_results/compute_benchmarks_indices.py168
-rw-r--r--legacy/scripts/ref_results/index_calculation.py49
-rw-r--r--legacy/scripts/ref_results/network_benchmarks_indices.py28
-rw-r--r--legacy/scripts/ref_results/reference.json97
-rw-r--r--legacy/scripts/ref_results/result_accum.py39
-rw-r--r--legacy/scripts/ref_results/storage_benchmarks_indices.py37
-rw-r--r--legacy/scripts/ref_results/suite_result.py58
-rw-r--r--legacy/tests/__init__.py0
-rw-r--r--legacy/tests/api/__init__.py0
-rw-r--r--legacy/tests/api/test_server.py131
-rw-r--r--legacy/tests/create_zones_test.py118
-rw-r--r--legacy/tests/functional/__init__.py0
-rw-r--r--legacy/tests/functional/yaml_schema_test.py24
-rw-r--r--legacy/tests/helper/perftest.yaml13
-rw-r--r--legacy/tests/helper/suite.yaml14
-rw-r--r--legacy/tests/helper/version.yaml20
-rw-r--r--legacy/tests/spawn_vm_test.py64
-rw-r--r--legacy/utils/__init__.py0
-rw-r--r--legacy/utils/create_zones.py86
-rw-r--r--legacy/utils/dashboard/__init__.py0
-rw-r--r--legacy/utils/dashboard/pushtoDB.py82
-rw-r--r--legacy/utils/report/__init__.py0
-rw-r--r--legacy/utils/report/get_indices.py16
-rw-r--r--legacy/utils/report/get_results.py58
-rw-r--r--legacy/utils/report/qtip_graph.py38
-rw-r--r--legacy/utils/report/qtip_report.py117
-rw-r--r--legacy/utils/spawn_vm.py206
-rw-r--r--legacy/utils/transform/__init__.py0
-rw-r--r--legacy/utils/transform/fio_transform.py37
-rw-r--r--legacy/utils/transform/iperf_transform.py35
79 files changed, 0 insertions, 3756 deletions
diff --git a/legacy/DO-NOT-DELETE b/legacy/DO-NOT-DELETE
deleted file mode 100644
index fdecaad1..00000000
--- a/legacy/DO-NOT-DELETE
+++ /dev/null
@@ -1,2 +0,0 @@
-The legacy code is no longer maintained. But they should be kept until we finish
-migration to new architecture.
diff --git a/legacy/__init__.py b/legacy/__init__.py
deleted file mode 100644
index 48893ae6..00000000
--- a/legacy/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
diff --git a/legacy/api/__init__.py b/legacy/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/api/__init__.py
+++ /dev/null
diff --git a/legacy/api/cmd/__init__.py b/legacy/api/cmd/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/api/cmd/__init__.py
+++ /dev/null
diff --git a/legacy/api/cmd/server.py b/legacy/api/cmd/server.py
deleted file mode 100644
index eea45ad3..00000000
--- a/legacy/api/cmd/server.py
+++ /dev/null
@@ -1,31 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corp and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-from flask import Flask
-from flask_restful import Api
-from flask_restful_swagger import swagger
-
-import legacy.api.router.mapper as mapper
-
-app = Flask(__name__)
-api = swagger.docs(Api(app), apiVersion='0.1', description='QTIP API specs')
-
-
-def add_routers():
- for (handler, url) in mapper.mappers:
- api.add_resource(handler, url)
-
-
-def main():
- add_routers()
- app.run(host='0.0.0.0')
-
-
-if __name__ == "__main__":
- main()
diff --git a/legacy/api/handler/__init__.py b/legacy/api/handler/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/api/handler/__init__.py
+++ /dev/null
diff --git a/legacy/api/handler/db.py b/legacy/api/handler/db.py
deleted file mode 100644
index 24fc27a5..00000000
--- a/legacy/api/handler/db.py
+++ /dev/null
@@ -1,98 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corp and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from datetime import datetime
-from operator import add
-import uuid
-
-jobs = {}
-threads = {}
-
-
-def create_job(args):
- if len(filter(lambda x: jobs[x]['state'] == 'processing', jobs.keys())) > 0:
- return None
- else:
- job = {'job_id': str(uuid.uuid4()),
- 'installer_type': args["installer_type"],
- 'installer_ip': args["installer_ip"],
- 'pod_name': args["pod_name"],
- 'suite_name': args["suite_name"],
- 'max_minutes': args["max_minutes"],
- 'type': args["type"],
- 'testdb_url': args["testdb_url"],
- 'node_name': args["node_name"],
- 'start_time': str(datetime.now()),
- 'end_time': None,
- 'state': 'processing',
- 'state_detail': [],
- 'result': None,
- 'result_detail': []}
- jobs[job['job_id']] = job
- return job['job_id']
-
-
-def delete_job(job_id):
- if job_id in threads:
- stop_thread(job_id)
- if job_id in jobs:
- jobs[job_id]['end_time'] = str(datetime.now())
- jobs[job_id]['state'] = 'terminated'
- return True
- else:
- return False
-
-
-def get_job_info(job_id):
- if job_id in jobs:
- return jobs[job_id]
- else:
- return None
-
-
-def finish_job(job_id):
- jobs[job_id]['end_time'] = str(datetime.now())
- jobs[job_id]['state'] = 'finished'
- jobs[job_id]['result'] = reduce(add, map(lambda x: x['result'],
- jobs[job_id]['result_detail']))
- del threads[job_id]
-
-
-def update_job_state_detail(job_id, state_detail):
- jobs[job_id]['state_detail'] = state_detail
-
-
-def update_job_result_detail(job_id, benchmark, result):
- result['benchmark'] = benchmark
- jobs[job_id]['result_detail'].append(result)
-
-
-def is_job_timeout(job_id):
- period = datetime.now() - datetime.strptime(jobs[job_id]['start_time'],
- "%Y-%m-%d %H:%M:%S.%f")
- return True if jobs[job_id]['max_minutes'] * 60 < period.total_seconds()\
- else False
-
-
-def start_thread(job_id, thread, thread_stop):
- threads[job_id] = {'thread': thread,
- 'thread_stop': thread_stop}
- thread.start()
-
-
-def stop_thread(job_id):
- if threads[job_id]['thread'].isAlive():
- threads[job_id]['thread_stop'].set()
- threads[job_id]['thread'].join()
- if job_id in threads:
- del threads[job_id]
-
-
-def update_benchmark_state(job_id, benchmark, benchmark_state):
- filter(lambda x: x["benchmark"] == benchmark,
- get_job_info(job_id)["state_detail"])[0]['state'] = benchmark_state
diff --git a/legacy/api/handler/job_handler.py b/legacy/api/handler/job_handler.py
deleted file mode 100644
index 4ecc1cee..00000000
--- a/legacy/api/handler/job_handler.py
+++ /dev/null
@@ -1,174 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import threading
-from copy import copy
-
-from flask_restful import Resource, reqparse
-from flask_restful_swagger import swagger
-from qtip.api.model.job_model import JobResponseModel
-from qtip.utils import args_handler as args_handler
-from werkzeug.exceptions import abort
-
-from legacy.api.handler import db, result_handler
-
-
-class Job(Resource):
- @swagger.operation(
- notes='get a job by ID',
- nickname='get',
- parameters=[],
- responseMessages=[
- {
- "code": 200,
- "message": "Job detail info."
- },
- {
- "code": 404,
- "message": "Can't not find the job id XXXXXXX"
- }
- ]
- )
- def get(self, id):
- ret = db.get_job_info(id)
- return ret if ret else abort(404, " Can't not find the job id %s" % id)
-
- @swagger.operation(
- notes='delete a job by ID',
- nickname='delete',
- parameters=[],
- responseMessages=[
- {
- "code": 200,
- "message": "Delete successfully"
- },
- {
- "code": 404,
- "message": "Can not find job_id XXXXXXXXX"
- }
- ]
- )
- def delete(self, id):
- ret = db.delete_job(id)
- return {'result': "Delete successfully"} if ret else abort(404, "Can not find job_id %s" % id)
-
-
-class JobList(Resource):
- @swagger.operation(
- note='create a job with parameters',
- nickname='create',
- parameters=[
- {
- "name": "body",
- "description": """
-"installer_type": The installer type, for example fuel, compass..,
-
-"installer_ip": The installer ip of the pod,
-
-"max_minutes": If specified, the maximum duration in minutes
-for any single test iteration, default is '60',
-
-"pod_name": If specified, the Pod name, default is 'default',
-
-"suite_name": If specified, Test suite name, for example 'compute', 'network', 'storage',
-default is 'compute',
-
-"type": BM or VM,default is 'BM',
-
-"benchmark_name": If specified, benchmark name in suite, for example 'dhrystone_bm.yaml',
-default is all benchmarks in suite with specified type,
-
-"testdb_url": test db http url, for example 'http://testresults.opnfv.org/test/api/v1',
-
-"node_name": node name reported to test db
- """,
- "required": True,
- "type": "JobModel",
- "paramType": "body"
- }
- ],
- type=JobResponseModel.__name__,
- responseMessages=[
- {
- "code": 200,
- "message": "Job submitted"
- },
- {
- "code": 400,
- "message": "Missing configuration data"
- },
- {
- "code": 409,
- "message": "It already has one job running now!"
- }
- ]
- )
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument('installer_type', type=str, required=True, help='installer_type is required')
- parser.add_argument('installer_ip', type=str, required=True, help='installer_ip is required')
- parser.add_argument('max_minutes', type=int, required=False, default=60, help='max_minutes should be integer')
- parser.add_argument('pod_name', type=str, required=False, default='default', help='pod_name should be string')
- parser.add_argument('suite_name', type=str, required=False, default='compute', help='suite_name should be string')
- parser.add_argument('type', type=str, required=False, default='BM', help='type should be BM, VM and ALL')
- parser.add_argument('benchmark_name', type=str, required=False, default='all', help='benchmark_name should be string')
- parser.add_argument('testdb_url', type=str, required=False, default=None,
- help='testdb_url should be test db http url,for example http://testresults.opnfv.org/test/api/v1')
- parser.add_argument('node_name', type=str, required=False, default=None, help='node_name should be string')
- args = parser.parse_args()
- if not args_handler.check_suite(args["suite_name"]):
- return abort(404, 'message:Test suite {0} does not exist under benchmarks/suite'.format(args["suite_name"]))
- if not args_handler.check_lab_name(args["pod_name"]):
- return abort(404, 'message: You have specified a lab {0}\
- that is not present in test_cases'.format(args['pod_name']))
-
- job_id = db.create_job(args)
- if not job_id:
- return abort(409, 'message:It already has one job running now!')
-
- benchmarks = args_handler.get_files_in_suite(args["suite_name"],
- args["type"].lower())
- test_cases = args_handler.get_files_in_test_plan(args["pod_name"],
- args["suite_name"],
- args["type"].lower())
- benchmarks_list = filter(lambda x: x in test_cases, benchmarks)
- if args["benchmark_name"] in benchmarks_list:
- benchmarks_list = [args["benchmark_name"]]
- if (args["benchmark_name"] is not 'all') and args["benchmark_name"] not in benchmarks_list:
- return abort(404, 'message: Benchmark name {0} does not exist in suit {1}'.format(args["benchmark_name"],
- args["suite_name"]))
- state_detail = map(lambda x: {'benchmark': x, 'state': 'idle'}, benchmarks_list)
- db.update_job_state_detail(job_id, copy(state_detail))
- thread_stop = threading.Event()
- post_thread = threading.Thread(target=self.thread_post, args=(args["installer_type"],
- benchmarks_list,
- args["pod_name"],
- args["suite_name"],
- job_id,
- args["testdb_url"],
- args["node_name"],
- thread_stop))
- db.start_thread(job_id, post_thread, thread_stop)
- return {'job_id': str(job_id)}
-
- def thread_post(self, installer_type, benchmarks_list, pod_name, suite_name,
- job_id, testdb_url, node_name, stop_event):
- for benchmark in benchmarks_list:
- if db.is_job_timeout(job_id) or stop_event.is_set():
- break
- db.update_benchmark_state(job_id, benchmark, 'processing')
- result = args_handler.prepare_and_run_benchmark(installer_type,
- '/home',
- args_handler.get_benchmark_path(pod_name,
- suite_name,
- benchmark))
- db.update_job_result_detail(job_id, benchmark, copy(result))
- db.update_benchmark_state(job_id, benchmark, 'finished')
- if (result_handler.dump_suite_result(suite_name) and testdb_url):
- result_handler.push_suite_result_to_db(suite_name, testdb_url, installer_type, node_name)
- db.finish_job(job_id)
diff --git a/legacy/api/handler/result_handler.py b/legacy/api/handler/result_handler.py
deleted file mode 100644
index 3d1d592e..00000000
--- a/legacy/api/handler/result_handler.py
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corp and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import importlib
-import json
-from os.path import expanduser
-
-import qtip.utils.dashboard.pushtoDB as push_to_db
-from qtip.utils import logger_utils
-
-logger = logger_utils.QtipLogger('suite_result').get
-
-
-def get_benchmark_result(benchmark_name, suite_name):
- benchmark_indices = importlib.import_module('scripts.ref_results'
- '.{0}_benchmarks_indices'.format(suite_name))
- methodToCall = getattr(benchmark_indices, '{0}_index'.format(benchmark_name))
- return methodToCall()
-
-
-def dump_suite_result(suite_name):
- suite_dict = {}
- suite_bench_list = {'compute': ['DPI', 'Dhrystone', 'Whetstone', 'SSL', 'RamSpeed'],
- 'storage': ['FIO'],
- 'network': ['IPERF']}
- temp = 0
- l = len(suite_bench_list[suite_name])
- for benchmark in suite_bench_list[suite_name]:
- try:
- suite_dict[benchmark] = get_benchmark_result(benchmark.lower(), suite_name)
- temp = temp + float(suite_dict[benchmark]['index'])
- except OSError:
- l = l - 1
- pass
-
- if l == 0:
- logger.info("No {0} suite results found".format(suite_name))
- return False
- else:
- suite_index = temp / l
- suite_dict_f = {'index': suite_index,
- 'suite_results': suite_dict}
- result_path = expanduser('~') + '/qtip/results'
- with open('{0}/{1}_result.json'.format(result_path, suite_name), 'w+') as result_json:
- json.dump(suite_dict_f, result_json, indent=4, sort_keys=True)
- return True
-
-
-def push_suite_result_to_db(suite_name, test_db_url, installer_type, node_name):
- with open('results/{0}_result.json'.format(suite_name), 'r') as result_file:
- j = json.load(result_file)
- push_to_db.push_results_to_db(test_db_url, '{0}_test_suite'.format(suite_name),
- j, installer_type, node_name)
diff --git a/legacy/api/model/__init__.py b/legacy/api/model/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/api/model/__init__.py
+++ /dev/null
diff --git a/legacy/api/model/job_model.py b/legacy/api/model/job_model.py
deleted file mode 100644
index 73baf660..00000000
--- a/legacy/api/model/job_model.py
+++ /dev/null
@@ -1,33 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from flask_restful import fields
-from flask_restful_swagger import swagger
-
-
-@swagger.model
-class JobModel:
- resource_fields = {
- 'installer_type': fields.String,
- 'installer_ip': fields.String,
- 'max_minutes': fields.Integer,
- 'pod_name': fields.String,
- 'suite_name': fields.String,
- 'type': fields.String,
- 'benchmark_name': fields.String,
- 'testdb_url': fields.String,
- 'node_name': fields.String
- }
- required = ['installer_type', 'installer_ip']
-
-
-@swagger.model
-class JobResponseModel:
- resource_fields = {
- 'job_id': fields.String
- }
diff --git a/legacy/api/router/__init__.py b/legacy/api/router/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/api/router/__init__.py
+++ /dev/null
diff --git a/legacy/api/router/mapper.py b/legacy/api/router/mapper.py
deleted file mode 100644
index 470d18e2..00000000
--- a/legacy/api/router/mapper.py
+++ /dev/null
@@ -1,15 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from legacy.api.handler.job_handler import Job, JobList
-
-
-mappers = [
- (JobList, '/api/v1.0/jobs'),
- (Job, '/api/v1.0/jobs/<string:id>'),
-]
diff --git a/legacy/api/router/mapper.py.orig b/legacy/api/router/mapper.py.orig
deleted file mode 100644
index 1acb40b5..00000000
--- a/legacy/api/router/mapper.py.orig
+++ /dev/null
@@ -1,19 +0,0 @@
-<<<<<<< HEAD
-from legacy.api.handler.job_handler import Job, JobList
-=======
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from legacy.api.handler import Job, JobList
->>>>>>> 615b529... Add licence header according to OPNFV contribution guidelines[1] by script[2]
-
-
-mappers = [
- (JobList, '/api/v1.0/jobs'),
- (Job, '/api/v1.0/jobs/<string:id>'),
-]
diff --git a/legacy/assets/perftest/common/git_proxy_pbook.yaml b/legacy/assets/perftest/common/git_proxy_pbook.yaml
deleted file mode 100644
index e190162b..00000000
--- a/legacy/assets/perftest/common/git_proxy_pbook.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#git
-- name: set git proxy(http)
- shell: "git config --global http.proxy {{ http_proxy }}"
- when: http_proxy is defined
- ignore_errors: yes
-
-- name: set git proxy(https)
- shell: "git config --global https.proxy {{https_proxy}}"
- when: https_proxy is defined
- ignore_errors: yes
-
diff --git a/legacy/assets/perftest/common/sys_proxy_pbook.yaml b/legacy/assets/perftest/common/sys_proxy_pbook.yaml
deleted file mode 100644
index 543285e3..00000000
--- a/legacy/assets/perftest/common/sys_proxy_pbook.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#env
-- name: insert shell proxy http
- lineinfile: dest=/etc/profile.d/proxy.sh state=present create=yes owner=root group=root mode=0644 regexp="export http_proxy={{ http_proxy }}"
- insertafter=EOF line="export http_proxy={{ http_proxy }}"
- when: http_proxy is defined
- ignore_errors: yes
-
-- name: insert shell proxy https
- lineinfile: dest=/etc/profile.d/proxy.sh state=present create=yes owner=root group=root mode=0644 regexp="export https_proxy={{ https_proxy }}"
- insertafter=EOF line="export https_proxy={{ https_proxy }}"
- when: https_proxy is defined
- ignore_errors: yes
-
-- name: insert no proxy
- lineinfile: dest=/etc/profile.d/proxy.sh state=present create=yes owner=root group=root mode=0644 regexp="{{ no_proxy }}"
- insertafter=EOF line="export no_proxy={{ no_proxy }}"
- when: no_proxy is defined
- ignore_errors: yes
-
-#wget
-- name: insert wget proxy(http)
- lineinfile: dest=/etc/wgetrc state=present regexp="http_proxy={{ http_proxy }}"
- insertafter="^#http_proxy" line="http_proxy={{ http_proxy }}"
- when: http_proxy is defined
- ignore_errors: yes
-
-- name: insert wget proxy(https)
- lineinfile: dest=/etc/wgetrc state=present regexp="https_proxy={{ https_proxy }}"
- insertafter="^#https_proxy" line="https_proxy={{ https_proxy }}"
- when: https_proxy is defined
- ignore_errors: yes
-
-#yum
-- name: insert yum proxy(http)
- lineinfile: dest=/etc/yum.conf state=present regexp="proxy={{ http_proxy }}"
- insertafter=EOF line="proxy={{ http_proxy }}"
- when: ansible_os_family == "RedHat" and http_proxy is defined
- ignore_errors: yes
-
-#apt
-
-- name: insert apt proxy(http)
- lineinfile: dest=/etc/apt/apt.conf state=present create=yes regexp="Acquire::http::Proxy \"{{ http_proxy }}\";"
- insertafter=EOF line="Acquire::http::Proxy \"{{ http_proxy }}\";"
- when: ansible_os_family == "Debian" and http_proxy is defined
- ignore_errors: yes
-
-- name: insert apt proxy(https)
- lineinfile: dest=/etc/apt/apt.conf state=present create=yes regexp="Acquire::https::Proxy \"{{ https_proxy }}\";"
- insertafter=EOF line="Acquire::https::Proxy \"{{ https_proxy }}\";"
- when: ansible_os_family == "Debian" and https_proxy is defined
- ignore_errors: yes
-
diff --git a/legacy/assets/perftest/etc/fio_test_job b/legacy/assets/perftest/etc/fio_test_job
deleted file mode 100644
index 6817abca..00000000
--- a/legacy/assets/perftest/etc/fio_test_job
+++ /dev/null
@@ -1,13 +0,0 @@
-[global]
-
-runtime= 600
-ioengine=libaio
-iodepth=2
-direct=1
-bs=4k
-rw=randrw
-
-[job1]
-size=5G
-
-
diff --git a/legacy/assets/perftest/etc/info_collect.py b/legacy/assets/perftest/etc/info_collect.py
deleted file mode 100644
index 3dbe55c2..00000000
--- a/legacy/assets/perftest/etc/info_collect.py
+++ /dev/null
@@ -1,94 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import pickle
-import json
-import sys
-
-os.system('inxi -b -c0 -n > $PWD/est_2')
-est_ob = open("est_2", "r+")
-est_ob2 = open("est_1", "w+")
-in_string = est_ob.read().replace('\n', ' ')
-cpu_idle = float(os.popen("""top -bn1 | grep "Cpu(s)" | awk '{print $8}'""").read().rstrip())
-cpu_usage = 100 - cpu_idle
-est_ob2.write(in_string)
-est_ob.close()
-est_ob2.close()
-
-inxi_host = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Host:).*(?=Kernel)' """).read().lstrip().rstrip()
-inxi_mem = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=Memory:).*(?=MB)' """).read().lstrip().rstrip() + "MB"
-inxi_cpu = os.popen("""cat $PWD/est_1 | grep -o -P '(?<=CPU).*(?=speed)' | cut -f2 -d':'""").read().lstrip().rstrip()
-inxi_distro = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Distro:).*(?=Machine:)' """).read().rstrip().lstrip()
-inxi_kernel = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Kernel:).*(?=Console:)' """).read().rstrip().lstrip()
-inxi_HD = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=HDD Total Size:).*(?=Info:)' """).read().rstrip().lstrip()
-inxi_product = os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=product:).*(?=Mobo:)' """).read().rstrip().lstrip()
-
-info_dict = {'hostname': inxi_host,
- 'product': inxi_product,
- 'os': inxi_distro,
- 'kernel': inxi_kernel,
- 'cpu': inxi_cpu,
- 'cpu_usage': '{0}%'.format(str(round(cpu_usage, 3))),
- 'memory_usage': inxi_mem,
- 'disk_usage': inxi_HD}
-network_flag = str(sys.argv[1]).rstrip()
-
-if (network_flag == 'n'):
-
- info_dict['network_interfaces'] = {}
- tem_2 = """ cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'"""
- print os.system(tem_2 + ' > Hello')
- i = int(os.popen(tem_2 + " | grep -o 'Card' | wc -l ").read())
- print i
-
- for x in range(1, i + 1):
- tem = """ cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Card-""" + str(x + 1) + """)'"""
- if i == 1:
- tem = """ cat $PWD/est_1 | grep -o -P '(?<=Network:).*(?=Info:)'"""
- inxi_card_1 = ((os.popen(tem + " | grep -o -P '(?<=Card:).*(?=Drives:)'|sed 's/ *driver:.*//'").read().rstrip().lstrip()))
- print inxi_card_1
- info_dict['network_interfaces']['interface_' + str(x)] = {}
- info_dict['network_interfaces']['interface_' + str(x)]['network_card'] = inxi_card_1
- inxi_card_2 = ((os.popen(tem + "| grep -o -P '(?<=Card:).*(?=Drives:)'|sed -e 's/^.*IF: //'").read())).rstrip().lstrip()
- info_dict['network_interfaces']['interface_' + str(x)]['interface_info'] = inxi_card_2
- elif x < (i):
- print "two"
- inxi_card_1 = ((os.popen(tem + "| sed 's/ *driver:.*//'").read().rstrip().lstrip()))
- info_dict['network_interfaces']['interface_' + str(x)] = {}
- info_dict['network_interfaces']['interface_' + str(x)]['network_Card'] = inxi_card_1
- inxi_card_2 = ((os.popen(tem + "|sed -e 's/^.*IF: //'").read())).rstrip().lstrip()
- info_dict['network_interfaces']['interface_' + str(x)]['interface_info'] = inxi_card_2
- elif x == i:
- print "Three"
- info_dict['network_interfaces']['interface_' + str(x)] = {}
- inxi_card_1 = ((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Drives:)'| sed 's/ *driver:.*//' """).read().rstrip().lstrip()))
- info_dict['network_interfaces']['interface_' + str(x)]['network_Card'] = inxi_card_1
- inxi_card_2 = ((os.popen(""" cat $PWD/est_1 | grep -o -P '(?<=Card-""" + str(x) + """:).*(?=Drives:)'| sed -e 's/^.*IF: //' """).read().rstrip().lstrip()))
- info_dict['network_interfaces']['interface_' + str(x)]['interface_info'] = inxi_card_2
- else:
- print "No network cards"
- os.system("bwm-ng -o plain -c 1 | grep -v '=' | grep -v 'iface' | grep -v '-' > bwm_dump")
- n_interface = int(os.popen(" cat bwm_dump | grep -v 'total' | wc -l ").read().rstrip())
- interface = {}
- for x in range(1, n_interface):
- interface_name = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $1}' ").read().rstrip().replace(':', '')
- interface[str(interface_name)] = {}
- interface[str(interface_name)]['Rx (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $2}' ").read().rstrip()
- interface[str(interface_name)]['Tx (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR==" + str(x) + "' | awk '{print $4}' ").read().rstrip()
- interface[str(interface_name)]['Total (KB/s)'] = os.popen(" cat bwm_dump | awk 'NR== " + str(x) + "' | awk '{print $6}' ").read().rstrip()
-
- info_dict['interface_io'] = interface
-
-print info_dict
-
-with open('./sys_info_temp', 'w+')as out_info:
- pickle.dump(info_dict, out_info)
-
-with open('temp', 'w+') as result_json:
- json.dump(info_dict, result_json, indent=4, sort_keys=True)
diff --git a/legacy/assets/perftest/fio.yaml b/legacy/assets/perftest/fio.yaml
deleted file mode 100644
index e6d1072d..00000000
--- a/legacy/assets/perftest/fio.yaml
+++ /dev/null
@@ -1,120 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
- - hosts: localhost
- connection: local
- gather_facts: no
-
- tasks:
- - name: making fio directory
- file: path={{Dest_dir}}/fio state=directory
-
- - name: making temporary fio directory
- file: path={{Dest_dir}}/fio/fio_temp state=directory
-
-
- - hosts: "{{role}}"
- become: yes
- remote_user: "{{username}}"
-
- tasks:
- - name: checking home directory
- shell: echo $HOME
- register: home_dir
-
- - name: cleaning fio directory
- file: path={{home_dir.stdout}}/fio state=absent
-
- - name: cleaning previous results
- file: path={{home_dir.stdout}}/qtip_result state=absent
-
- - name: making fio temporary directory
- file: path={{home_dir.stdout}}/fio state=directory
-
- - name: making results temporary directory
- file: path={{home_dir.stdout}}/qtip_result state=directory
-
- - include: ./common/sys_proxy_pbook.yaml
-
- - include: ./common/sys_info_pbook.yaml
- vars:
- network: false
-
- - name: Installing fio dependencies when CentOS
- shell: sudo yum install wget gcc libaio-devel -y
- when: ansible_os_family == "RedHat"
-
- - name: Installing fio dependencies when Ubuntu
- shell: sudo apt-get install wget gcc libaio-dev -y
- when: ansible_os_family == "Debian"
-
- - name: Fetching fio
- shell: cd $HOME/fio/ && wget http://freecode.com/urls/3aa21b8c106cab742bf1f20d60629e3f -O fio.tar.gz
-
- - name: Untar fio
- shell: cd $HOME/fio/ && sudo tar -zxvf fio.tar.gz
-
- - name: configure
- shell: cd $HOME/fio/fio-2.1.10 && sudo ./configure && sudo make
-
- - name: Fetching fio job
- copy: src=./etc/fio_test_job dest={{home_dir.stdout}}/fio/fio-2.1.10/
-
- - name: Benchmarking block storage through fio
- shell: cd $HOME/fio/fio-2.1.10 && sudo ./fio --output-format=json --output=$HOME/qtip_result/fio_result.json fio_test_job
-
- - name: Fetching result transformation script
- copy: src={{workingdir}}/qtip/utils/transform/fio_transform.py dest={{home_dir.stdout}}/qtip_result
-
- - name: Transforming result
- shell: cd $HOME/qtip_result && sudo python fio_transform.py
-
- - name: copy report formation script
- copy: src={{workingdir}}/qtip/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result
-
- - name: consolidating report
- shell: cd $HOME/qtip_result && sudo python final_report.py FIO {{fname}}
-
- - name: registering files
- shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2
- register: files_to_copy
-
- - name: copy results
- fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{Dest_dir}}/fio/fio_temp
- with_items: "{{files_to_copy.stdout_lines}}"
-
- - name: registering log files
- shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2
- register: copy_log_results
-
- - name: copying log results
- fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{Dest_dir}}/fio/fio_temp
- with_items: "{{copy_log_results.stdout_lines}}"
-
- - name: cleaning fio
- file: path={{home_dir.stdout}}/fio state=absent
-
- - name: cleaning_qtip_result
- file: path={{home_dir.stdout}}/qtip_result state=absent
-
- - hosts: localhost
- connection: local
- gather_facts: no
-
- tasks:
- - name: extracting_json
- shell: (find {{Dest_dir}}/fio/fio_temp/ -name "*.json" | xargs cp -t {{Dest_dir}}/fio/)
-
- - name: making_logs_folder
- file: path={{Dest_dir}}/fio/logs state=directory
-
- - name: extracting_log
- shell: (find {{Dest_dir}}/fio/fio_temp/ -name "*.log" | xargs cp -t {{Dest_dir}}/fio/logs)
-
- - name: removing fio_log
- file: path={{Dest_dir}}/fio/fio_temp state=absent
diff --git a/legacy/assets/perftest/iperf.yaml b/legacy/assets/perftest/iperf.yaml
deleted file mode 100644
index 6654c556..00000000
--- a/legacy/assets/perftest/iperf.yaml
+++ /dev/null
@@ -1,170 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
- - hosts: localhost
- connection: local
- gather_facts: no
-
- tasks:
- - name: making Iperf directory
- file: path={{Dest_dir}}/iperf state=directory
-
- - name: making temporary iperf directory
- file: path={{Dest_dir}}/iperf/iperf_temp state=directory
-
-
- - hosts: "{{role}}"
- become: yes
- remote_user: "{{username}}"
-
- tasks:
- - name: Rolename
- set_fact:
- rolename: "{{role}}"
- when: role is defined
-
- - name: installertype
- set_fact:
- installertype: "{{installer}}"
-
- - name: Get Hostname
- shell: echo $HOSTNAME
- register: hostID
-
- - name: echo
- shell: echo index_var
-
- - name: checking home directory
- shell: echo $HOME
- register: home_dir
-
- - name: cleaning iperf directory
- file: path={{home_dir.stdout}}/iperf state=absent
-
- - name: cleaning previous results
- file: path={{home_dir.stdout}}/qtip_result state=absent
-
- - name: making Iperf temporary directory
- file: path={{home_dir.stdout}}/iperf state=directory
-
- - name: making results temporary directory
- file: path={{home_dir.stdout}}/qtip_result state=directory
-
- - include: ./common/sys_proxy_pbook.yaml
-
- - include: ./common/sys_info_pbook.yaml
- vars:
- network: true
-
- - name: Installing Epel-release when CentOS
- shell: sudo yum install epel-release -y
- when: ansible_os_family == "RedHat"
-
- - name: Allow iperf server port in iptables input rules
- shell: iptables -A INPUT -p tcp --dport {{iperf_port}} -j ACCEPT
- vars:
- iperf_port: 5201
- ignore_errors: yes
- when: rolename == "1-server" and installertype == 'fuel'
-
- - name: Installing IPERF when Ubuntu
- shell: sudo apt-get install iperf3 -y
- when: ansible_os_family == "Debian"
-
- - name: Installing Iperf3
- shell: sudo yum install iperf3 -y
- when: ansible_os_family == "RedHat"
-
- - name: Running iperf on server
- shell: iperf3 -s
- async: 400
- poll: 0
- when: rolename == "1-server"
-
- - name: Running Iperf on Host
- shell: iperf3 --time {{duration}} -b 0 G -c {{ip1}} -J -O10 >> {{home_dir.stdout}}/qtip_result/iperf_raw.json
- ignore_errors: yes
- with_items:
- - "{{ip1}}"
- when: rolename == "2-host" and "{{privateip1}}" == "NONE"
-
- - name: Running Iperf on Host
- shell: iperf3 --time {{duration}} -b 0 G -c {{privateip1}} -J -O10 >> {{home_dir.stdout}}/qtip_result/iperf_raw.json
- ignore_errors: yes
- with_items:
- - "{{ip1}}"
- when: rolename == "2-host" and "{{privateip1}}" != "NONE"
-
- - name: Fetching result transformation script
- copy: src={{workingdir}}/qtip/utils/transform/iperf_transform.py dest={{home_dir.stdout}}/qtip_result
- - name: Transforming result
-
- shell: cd $HOME/qtip_result && sudo python iperf_transform.py
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: copy report formation script
- copy: src={{workingdir}}/qtip/utils/transform/final_report.py dest={{home_dir.stdout}}/qtip_result
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: consolidating report
- shell: cd $HOME/qtip_result && sudo python final_report.py IPERF {{fname}}
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: Files to Copy
- shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.json") | cut -d'/' -f2
- register: files_to_copy
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: copy results
- fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{Dest_dir}}/iperf/iperf_temp
- with_items: "{{files_to_copy.stdout_lines}}"
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: registering log files
- shell: (cd $HOME/qtip_result/; find . -maxdepth 1 -name "*.log") | cut -d'/' -f2
- register: copy_log_results
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: copying log results
- fetch: src={{home_dir.stdout}}/qtip_result/{{item}} dest={{Dest_dir}}/iperf/iperf_temp
- with_items: "{{copy_log_results.stdout_lines}}"
- when: rolename =="2-host" and "{{ip2}}" == ''
-
- - name: cleaning iperf directory
- file: path={{home_dir.stdout}}/iperf state=absent
-
- - name: cleaning previous results
- file: path={{home_dir.stdout}}/qtip_result state=absent
-
- - hosts: localhost
- connection: local
- gather_facts: no
-
- tasks:
- - name: Rolename
- set_fact:
- rolename: "{{role}}"
- when: role is defined
-
- - name: extracting_json
- shell: (find {{Dest_dir}}/iperf/iperf_temp/ -name "*.json" | xargs cp -t {{Dest_dir}}/iperf/)
- when: rolename == "2-host"
-
- - name: making_logs_folder
- file: path={{Dest_dir}}/iperf/logs state=directory
-
- - name: extracting_log
- shell: ( find {{Dest_dir}}/iperf/iperf_temp/ -name "*.log" | xargs cp -t {{Dest_dir}}/iperf/logs)
- when: rolename == "2-host"
-
- - name: removing iperf_raw file
- file: path={{Dest_dir}}/iperf/iperf_raw.json state=absent
- when: rolename == "2-host"
-
- - name: removing iperf_temp
- file: path={{Dest_dir}}/iperf/iperf_temp state=absent
diff --git a/legacy/assets/perftest/summary b/legacy/assets/perftest/summary
deleted file mode 100644
index 5891408c..00000000
--- a/legacy/assets/perftest/summary
+++ /dev/null
@@ -1,23 +0,0 @@
----
-
- test_cases:
- - name: fio
- description: Storage performance benchmark
-
- - name: iperf
- description: Measures the network throughput
-
- - name: dpi
- description: Traffic classification rate provides a measure for CPU performance
-
- - name: ssl
- description: CPU performance benchmark
-
- - name: dhrystone
- description: Evaluate CPU's integer operation performance
-
- - name: whetstone
- description: Evaluate CPU's floating point performance
-
- - name: ramspeed
- description: Measures the memory performance of a machine
diff --git a/legacy/assets/testplan/default/network/iperf_bm.yaml b/legacy/assets/testplan/default/network/iperf_bm.yaml
deleted file mode 100644
index 3b10a383..00000000
--- a/legacy/assets/testplan/default/network/iperf_bm.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-Scenario:
- benchmark: iperf
- topology: Client and Server on different baremetal Compute nodes
- server: machine_1
- client: machine_2
- benchmark_details:
- duration: 20
- protocol: tcp
- bandwidthGbps: 10
-
-Context:
- Host_Machines:
- machine_1:
- ip:
- pw:
- role: 1-server
- machine_2:
- ip:
- pw:
- role: 2-host
-
- Virtual_Machines:
-
-Test_Description:
- Test_category: "network"
- Benchmark: "iperf"
- Overview: >
- '''This test will run the IPERF benchmark on virutalmachine_1 and virtualmachine_2. On the\n
- same compute node
- if you wish to add a host machine add the following information under the Host_Machine tag
- virtualmachine_1:
- availability_zone: compute1
- OS_image: QTIP_CentOS
- public_network: 'net04_ext'
- role: 1-server
- flavor: m1.large
-
- virtualmachine_2:
- availability_zone: compute2
- OS_image: QTIP_CentOS
- public_network: 'net04_ext'
- role: 2-host
- flavor: m1.large
-
- machine_1:
- ip:
- pw:
- role:
- '''
-
diff --git a/legacy/assets/testplan/default/network/iperf_vm.yaml b/legacy/assets/testplan/default/network/iperf_vm.yaml
deleted file mode 100644
index e42dc0bb..00000000
--- a/legacy/assets/testplan/default/network/iperf_vm.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-Scenario:
- benchmark: iperf
- topology: Client and Server on ONE compute
- server : virtualmachine_1
- client: virtualmachine_2
- description: 'Leave the bandwidth as 0 to throttle maximum traffic'
- benchmark_details:
- duration: 20
- protocol: tcp
- bandwidthGbps: 0
-
-Context:
- Host_Machines:
-
- Virtual_Machines:
- virtualmachine_1:
- availability_zone: compute1
- OS_image: QTIP_CentOS
- public_network: 'net04_ext'
- role: 1-server
- flavor: m1.large
-
- virtualmachine_2:
- availability_zone: compute1
- OS_image: QTIP_CentOS
- public_network: 'net04_ext'
- role: 2-host
- flavor: m1.large
-
-Test_Description:
- Test_category: "network"
- Benchmark: "iperf"
- Overview: >
- '''This test will run the IPERF benchmark on virutalmachine_1 and virtualmachine_2. On the\n
- same compute node
- if you wish to add a host machine add the following information under the Host_Machine tag
-
- machine_1:
- ip:
- pw:
- role:
- '''
-
diff --git a/legacy/assets/testplan/default/network/iperf_vm_2.yaml b/legacy/assets/testplan/default/network/iperf_vm_2.yaml
deleted file mode 100644
index 8a1d1a0d..00000000
--- a/legacy/assets/testplan/default/network/iperf_vm_2.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-Scenario:
- benchmark: iperf
- topology: Client and Server on two different compute nodes
- server : virtualmachine_1
- client: virtualmachine_2
- description: 'Leave the bandwidth as 0 to throttle maximum traffic'
- benchmark_details:
- duration: 20
- protocol: tcp
- bandwidthGbps: 0
-
-Context:
- Host_Machines:
-
-
- Virtual_Machines:
- virtualmachine_1:
- availability_zone: compute1
- OS_image: QTIP_CentOS
- public_network: 'net04_ext'
- role: 1-server
- flavor: m1.large
-
- virtualmachine_2:
- availability_zone: compute2
- OS_image: QTIP_CentOS
- public_network: 'net04_ext'
- role: 2-host
- flavor: m1.large
-
-Test_Description:
- Test_category: "network"
- Benchmark: "iperf"
- Overview: >
- '''This test will run the IPERF benchmark on virutalmachine_1 and virtualmachine_2. On the\n
- same compute node
- if you wish to add a host machine add the following information under the Host_Machine tag
-
- machine_1:
- ip:
- pw:
- role:
- '''
-
diff --git a/legacy/assets/testplan/default/storage/fio_bm.yaml b/legacy/assets/testplan/default/storage/fio_bm.yaml
deleted file mode 100644
index d226b1af..00000000
--- a/legacy/assets/testplan/default/storage/fio_bm.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-Scenario:
- benchmark: fio
- host: machine_1, machine_2
- server:
-
-Context:
- Host_Machines:
- machine_1:
- ip:
- pw:
- role: host
- machine_2:
- ip:
- pw:
- role: host
-
-
- Virtual_Machines:
-
-
-Test_Description:
- Test_category: "Storage"
- Benchmark: "FIO"
- Overview: >
- '''This test will run the FIO benchmark in parallel on host machines "machine_1" and "machine_2".\n
- The fio job specifications can be found in qtip/benchmarks/fio_jobs/test_job.
- The job conists of an fio load of:
- 1.50% rand read 50% rand write
- 2.Asynch engine
- 3.Direct IO.
- 4.Queing depth of 2
-
- if you wish to add another machine add the following information under the Host_Machines tag
- machine_3:
- ip: 172.18.0.16
- pw: Op3nStack
- role: host
- '''
-
diff --git a/legacy/assets/testplan/default/storage/fio_vm.yaml b/legacy/assets/testplan/default/storage/fio_vm.yaml
deleted file mode 100644
index b1cf3142..00000000
--- a/legacy/assets/testplan/default/storage/fio_vm.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-Scenario:
- benchmark: fio
- host: virtualmachine_1, virtualmachine_2
- server:
-
-Context:
- Host_Machines:
-
- Virtual_Machines:
- virtualmachine_1:
- availability_zone: compute1
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: host
- virtualmachine_2:
- availability_zone: compute2
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: host
-
-Test_Description:
- Test_category: "Storage"
- Benchmark: "FIO"
- Overview: >
- '''This test will run the FIO benchmark in parallel on virtualmachine_1 and virtualmachine_2.\n
- The fio job specifications can be found in qtip/benchmarks/fio_jobs/test_job.
- The job conists of an fio load of:
- 1.50% rand read 50% rand write
- 2.Asynch engine
- 3.Direct IO.
- 4.Queing depth of 2
-
- if you wish to add a virtual machine add the following information under the Virtual_Machine tag
-
- virtualmachine_3:
- availability_zone:
- public_network:
- OS_image:
- flavor:
- role:
- '''
-
diff --git a/legacy/cli/helper.py b/legacy/cli/helper.py
deleted file mode 100644
index acfecf8d..00000000
--- a/legacy/cli/helper.py
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corp and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-import os
-
-
-def fetch_root():
- return os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'benchmarks/')
diff --git a/legacy/config/SampleHeat.yaml b/legacy/config/SampleHeat.yaml
deleted file mode 100644
index 650c6a0c..00000000
--- a/legacy/config/SampleHeat.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-heat_template_version: 2015-04-30
-
-description: >
- Used to run VMs for QTIP
-
-parameters:
- image:
- type: string
- description: Name of the image
- default: QTIP_CentOS
-
- external_net_name:
- type: string
- description: Name of the external network which management network will connect to
- default: admin_floating_net
-
-resources:
- flavor:
- type: OS::Nova::Flavor
- properties:
- ram: 8192
- vcpus: 8
- disk: 80
-
- network:
- type: OS::Neutron::Net
- properties:
- name: qtip_net
-
- subnet:
- type: OS::Neutron::Subnet
- properties:
- name: qtip_subnet
- ip_version: 4
- cidr: 192.168.0.0/24
- network: { get_resource: network }
- dns_nameservers: [8.8.8.8]
-
- management_router:
- type: OS::Neutron::Router
- properties:
- name: qtip_router
- external_gateway_info:
- network: { get_param: external_net_name }
-
- management_router_interface:
- type: OS::Neutron::RouterInterface
- properties:
- router: { get_resource: management_router }
- subnet: { get_resource: subnet }
-
- security_group:
- type: OS::Neutron::SecurityGroup
- properties:
- name: qtip_security_group
- rules:
- - port_range_min: 22
- port_range_max: 5201
- protocol: tcp
- - port_range_min: 22
- port_range_max: 5201
- protocol: udp
- - protocol: icmp
-
-outputs:
- description: 'none'
diff --git a/legacy/docker/README.md b/legacy/docker/README.md
deleted file mode 100644
index 35ac0935..00000000
--- a/legacy/docker/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# QTIP The Indices for Performance
-
-[QTIP] is an [OPNFV] project.
-
-It aims to build a platform for creating and sharing indices of [NFVI] performance.
-
-See the [project vision](https://wiki.opnfv.org/display/qtip/Vision) for more details.
-
-[QTIP]: https://wiki.opnfv.org/display/qtip
-[OPNFV]: https://www.opnfv.org
-[NFVI]: https://en.wikipedia.org/wiki/Network_function_virtualization
diff --git a/legacy/docker/cleanup_qtip_image.sh b/legacy/docker/cleanup_qtip_image.sh
deleted file mode 100644
index 9c2b59db..00000000
--- a/legacy/docker/cleanup_qtip_image.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-if [[ ! -f ${QTIP_DIR}/openrc ]];then
- source ${REPOS_DIR}/releng/utils/fetch_os_creds.sh \
- -d ${QTIP_DIR}/openrc \
- -i ${INSTALLER_TYPE} \
- -a ${INSTALLER_IP}
-fi
-
-source ${QTIP_DIR}/openrc
-
-cleanup_image()
-{
- echo
- if ! glance image-list; then
- return
- fi
-
- echo "Deleting image QTIP_CentOS..."
- glance image-delete $(glance image-list | grep -e QTIP_CentOS | awk '{print $2}')
-
-}
-
-cleanup_image
diff --git a/legacy/docker/prepare_qtip_image.sh b/legacy/docker/prepare_qtip_image.sh
deleted file mode 100644
index 4095c806..00000000
--- a/legacy/docker/prepare_qtip_image.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-IMGNAME='QTIP_CentOS.qcow2'
-IMGPATH='/home/opnfv/imgstore'
-IMGURL='http://build.opnfv.org/artifacts.opnfv.org/qtip/QTIP_CentOS.qcow2'
-
-load_image()
-{
- if [[ -n $( glance image-list | grep -e QTIP_CentOS) ]]; then
- return
- fi
-
- test -d $IMGPATH || mkdir -p $IMGPATH
- if [[ ! -f "$IMGPATH/$IMGNAME" ]];then
- echo
- echo "========== Downloading QTIP_CentOS image =========="
- cd $IMGPATH
- wget -c --progress=dot:giga $IMGURL
- fi
-
- echo
- echo "========== Loading QTIP_CentOS image =========="
- output=$(glance image-create \
- --name QTIP_CentOS \
- --visibility public \
- --disk-format qcow2 \
- --container-format bare \
- --file $IMGPATH/$IMGNAME )
- echo "$output"
-
- IMAGE_ID=$(echo "$output" | grep " id " | awk '{print $(NF-1)}')
-
- if [ -z "$IMAGE_ID" ]; then
- echo 'Failed uploading QTIP_CentOS image to cloud'.
- exit 1
- fi
-
- echo "QTIP_CentOS image id: $IMAGE_ID"
-}
-
-rm -rf ${QTIP_DIR}/openrc
-
-${REPOS_DIR}/releng/utils/fetch_os_creds.sh \
--d ${QTIP_DIR}/openrc \
--i ${INSTALLER_TYPE} \
--a ${INSTALLER_IP}
-
-source ${QTIP_DIR}/openrc
-
-load_image
diff --git a/legacy/docker/push_db.sh b/legacy/docker/push_db.sh
deleted file mode 100755
index 50341eac..00000000
--- a/legacy/docker/push_db.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-cd ${QTIP_DIR} && python qtip/utils/dashboard/pushtoDB.py
diff --git a/legacy/docker/run_qtip.sh b/legacy/docker/run_qtip.sh
deleted file mode 100755
index 98abf139..00000000
--- a/legacy/docker/run_qtip.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#! /bin/bash
-
-QTIP=qtip/run.py
-
-run_test_suite()
-{
- if [ "$TEST_CASE" == "compute" ]; then
- cd ${QTIP_DIR} && python ${QTIP} -l default -f compute
- cd ${QTIP_DIR} && python scripts/ref_results/suite_result.py compute
- elif [ "$TEST_CASE" == "storage" ]; then
- cd ${QTIP_DIR} && python ${QTIP} -l default -f storage
- cd ${QTIP_DIR} && python scripts/ref_results/suite_result.py storage
- elif [ "$TEST_CASE" == "network" ]; then
- cd ${QTIP_DIR} && python ${QTIP} -l default -f network
- cd ${QTIP_DIR} && python scripts/ref_results/suite_result.py network
- elif [ "$TEST_CASE" == "all" ]; then
- cd ${QTIP_DIR} && python ${QTIP} -l default -f compute
- cd ${QTIP_DIR} && python ${QTIP} -l default -f storage
- cd ${QTIP_DIR} && python ${QTIP} -l default -f network
-
- cd ${QTIP_DIR} && python scripts/ref_results/suite_result.py compute
- cd ${QTIP_DIR} && python scripts/ref_results/suite_result.py storage
- cd ${QTIP_DIR} && python scripts/ref_results/suite_result.py network
- fi
-}
-
-rm -f ${QTIP_DIR}/config/QtipKey*
-
-echo "Generating ssh keypair"
-ssh-keygen -t rsa -N "" -f ${QTIP_DIR}/config/QtipKey -q
-
-source ${QTIP_DIR}/docker/prepare_qtip_image.sh
-
-run_test_suite
-
-source ${QTIP_DIR}/docker/cleanup_qtip_image.sh
-
-echo "Remove ssh keypair"
-rm -f ${QTIP_DIR}/config/QtipKey*
diff --git a/legacy/docs/_01-compute.rst b/legacy/docs/_01-compute.rst
deleted file mode 100644
index 56be5488..00000000
--- a/legacy/docs/_01-compute.rst
+++ /dev/null
@@ -1,104 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-
-Compute Suite
-=============
-
-Introduction
-------------
-
-The QTIP testing suite aims to benchmark the compute components of an OPNFV platform.
-Such components include, the CPU performance, the memory performance.
-Additionally virtual computing performance provided by the Hypervisor (KVM) installed as part of OPNFV platforms would be benchmarked too.
-
-The test suite consists of both synthetic and application specific benchmarks to test compute components.
-
-All the compute benchmarks could be run in 2 scenarios:
-
-1. On Baremetal Machines provisioned by an OPNFV installer (Host machines)
-2. On Virtual Machines brought up through OpenStack on an OPNFV platform
-
-Note: The Compute benchmank suite constains relatively old benchmarks such as dhrystone and whetstone. The suite would be updated for better benchmarks such as Linbench for the OPNFV C release.
-
-Benchmarks
-----------
-
-The benchmarks include:
-
-Dhrystone 2.1
-^^^^^^^^^^^^^^^^
-
-Dhrystone is a synthetic benchmark for measuring CPU performance. It uses integer calculations to evaluate CPU capabilities.
-Both Single CPU performance is measured along multi-cpu performance.
-
-
-Dhrystone, however, is a dated benchmark and has some short comings.
-Written in C, it is a small program that doesn't test the CPU memory subsystem.
-Additionally, dhrystone results could be modified by optimizing the compiler and insome cases hardware configuration.
-
-References: http://www.eembc.org/techlit/datasheets/dhrystone_wp.pdf
-
-Whetstone
-^^^^^^^^^^^^
-
-Whetstone is a synthetic benchmark to measure CPU floating point operation performance.
-Both Single CPU performance is measured along multi-cpu performance.
-
-Like Dhrystone, Whetstone is a dated benchmark and has short comings.
-
-References:
-
-http://www.netlib.org/benchmark/whetstone.c
-
-OpenSSL Speed
-^^^^^^^^^^^^^^^^
-
-OpenSSL Speed can be used to benchmark compute performance of a machine. In QTIP, two OpenSSL Speed benchmarks are incorporated:
-1. RSA signatunes/sec signed by a machine
-2. AES 128-bit encryption throughput for a machine for cipher block sizes
-
-References:
-
-https://www.openssl.org/docs/manmaster/apps/speed.html
-
-RAMSpeed
-^^^^^^^^
-
-RAMSpeed is used to measure a machine's memory perfomace.
-The problem(array)size is large enough to ensure Cache Misses so that the main machine memory is used.
-INTmem and FLOATmem benchmarks are executed in 4 different scenarios:
-
-a. Copy: a(i)=b(i)
-b. Add: a(i)=b(i)+c(i)
-c. Scale: a(i)=b(i)*d
-d. Tniad: a(i)=b(i)+c(i)*d
-
-INTmem uses integers in these four benchmarks whereas FLOATmem uses floating points for these benchmarks.
-
-References:
-
-http://alasir.com/software/ramspeed/
-
-https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/W51a7ffcf4dfd_4b40_9d82_446ebc23c550/page/Untangling+memory+access+measurements
-
-DPI
-^^^
-
-nDPI is a modified variant of OpenDPI, Open source Deep packet Inspection, that is maintained by ntop.
-An example application called *pcapreader* has been developed and is available for use along nDPI.
-
-A sample .pcap file is passed to the *pcapreader* application.
-nDPI classifies traffic in the pcap file into different categories based on string matching.
-The *pcapreader* application provides a throughput number for the rate at which traffic was classified, indicating a machine's computational performance.
-The results are run 10 times and an average is taken for the obtained number.
-
-*nDPI may provide non consistent results and was added to Brahmaputra for experimental purposes*
-
-References:
-
-http://www.ntop.org/products/deep-packet-inspection/ndpi/
-
-http://www.ntop.org/wp-content/uploads/2013/12/nDPI_QuickStartGuide.pdf
diff --git a/legacy/docs/_02-network.rst b/legacy/docs/_02-network.rst
deleted file mode 100644
index 00fe5b0a..00000000
--- a/legacy/docs/_02-network.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-
-Network Suite
-=============
-
-QTIP uses IPerf3 as the main tool for testing the network throughput.
-There are three tests that are run through the QTIP framework.
-
-**1. Network throughput between two compute nodes**
-
-**2. Network Throughput between two VMs on the same compute node**
-
-**3. Network Throughput between two VMs on different compute nodes**
-
-
-Network throughput between two compute nodes
------------------------------------------------
-
-For the throughput between two compute nodes, Iperf3 is installed on the compute nodes comprising the systems-under-test.
-One of the compute nodes is used as a server and the other as a client.
-The client pushes traffic to the server for a duration specified by the user in the configuration file for Iperf3.
-
-
-These files can be found in the "benchmarks/testplan/{POD}/network/" directory.
-The bandwidth is limited by the physical link layer speed connecting the two compute nodes.
-The result file includes the b/s bandwidth and the CPU usage for both the client and server.
-
-Network throughput between two VMs on the same compute node
---------------------------------------------------------------
-
-QTIP framework sets up a stack with a private network, security groups, routers and attaches two VMs to this network.
-Iperf3 is installed on the VMs and one is assigned the role of client while the other VM serves as a server.
-Traffic is pushed over the QTIP private network between the two VMs.
-A closer look is needed to see how the traffic actually flows between the VMs in this configuration to understand what is happening to the packet as it traverses the OpenStack virtual network.
-
-The packet originates from VM1 and its sent to the Linux bridge via a tap interface where the security groups are written.
-Afterwards the packet is forwarded to the Integration bridge (br-int) via a patch port.
-Since VM2 is also connected to the Integration bridge in a similar manner as VM1, the packet gets forwarded to the linux bridge connecting VM2.
-After the Linux bridge the packet is sent to VM2 and is received by the Iperf3 server.
-Since no physical link is involved in this topology, only the OVS (Integration bridge) (br-int) is being benchmarked.
-
-
-Network throughput between two VMs on different compute nodes
---------------------------------------------------------------
-
-
-As in case 2, QTIP framework sets up a stack with a private network, security groups, routers, and two VMs which are attached to the created network. However, the two VMs are spawned up on different compute nodes.
-
-Since the VMs are spawned on different nodes, the traffic involves additional paths.
-
-The traffic packet leaves the client VM and makes its way to the Integration Bridge (br-int) as in the previous case through a linux bridge and a patch port.
-The integration bridge (br-int) forwards the packet to the the tunneling bridge (br-tun) where the packet is encapsulated based on the tunneling protocol used (GRE/VxLAN).
-The packet then moves onto the physical link through the ethernet bridge (br-eth).
-
-On the receiving compute node, the packet arrives at ethernet bridge(br-eth) through the physical link.
-This packet then moves to the tunneling bridge (br-tun) where the packet is decapsulated.
-The packet then moves onto the internal bridge (br-int) and finally moves through a patch port into the linux bridge and eventually to the VM where it is received by the Iperf server application.
diff --git a/legacy/docs/_03-storage.rst b/legacy/docs/_03-storage.rst
deleted file mode 100644
index b1490432..00000000
--- a/legacy/docs/_03-storage.rst
+++ /dev/null
@@ -1,31 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-
-Storage Suite
-=============
-
-The QTIP benchmark suite aims to evaluate storage components within an OPNFV platform.
-For Brahamaputra release, FIO would evaluate File System performance for the host machine.
-It will also test the I/O performance provided by the hypervisor(KVM) when Storage benchmarks are run inside VMs.
-
-QTIP storage test cases consist of:
-
-**1. FIO Job to benchmark baremetal file system performance**
-
-**2. FIO Job to bechmark virtual machine file system performance**
-
-**Note: For Brahmaputra release, only the Ephemeral Storage is being tested. For C release persistent block and object storage would be tested.**
-
-The FIO Job would consist of:
-
-1. A file size of 5GB
-2. Random Read 50%, Random Write 50%
-3. Direct I/O
-4. Asynch I/O Engine
-5. I/O Queue depth of 2
-6. Block size :4K
-
-For this Job, I/O per second would be measured along mean I/O latency to provide storage performance numbers.
diff --git a/legacy/docs/_testcase_description.rst b/legacy/docs/_testcase_description.rst
deleted file mode 100644
index d60ca949..00000000
--- a/legacy/docs/_testcase_description.rst
+++ /dev/null
@@ -1,46 +0,0 @@
-.. Template to be used for test case descriptions in QTIP Project.
- Write one .rst per test case.
- Borrowed Heavily from Yardstick
- Upload the .rst for the test case in /docs/ directory.
- Review in Gerrit.
-
-Test Case Description
-=====================
-
-+-----------------------------------------------------------------------------+
-|test case slogan e.g. Network throughput |
-+==============+==============================================================+
-|test case id | e.g. qtip_throughput |
-+--------------+--------------------------------------------------------------+
-|metric | what will be measured, e.g. latency |
-+--------------+--------------------------------------------------------------+
-|test purpose | describe what is the purpose of the test case |
-+--------------+--------------------------------------------------------------+
-|configuration | what .yaml file to use, state SLA if applicable, state |
-| | test duration, list and describe the scenario options used in|
-| | this TC and also list the options using default values. |
-+--------------+--------------------------------------------------------------+
-|test tool | e.g. ping |
-+--------------+--------------------------------------------------------------+
-|references | e.g. RFCxxx, ETSI-NFVyyy |
-+--------------+--------------------------------------------------------------+
-|applicability | describe variations of the test case which can be |
-| | performend, e.g. run the test for different packet sizes |
-+--------------+--------------------------------------------------------------+
-|pre-test | describe configuration in the tool(s) used to perform |
-|conditions | the measurements (e.g. fio, pktgen), POD-specific |
-| | configuration required to enable running the test |
-+--------------+------+----------------------------------+--------------------+
-|test sequence | step | description | result |
-| +------+----------------------------------+--------------------+
-| | 1 | use this to describe tests that | what happens in |
-| | | require several steps e.g. | this step |
-| | | step 1 collect logs | e.g. logs collected|
-| +------+----------------------------------+--------------------+
-| | 2 | remove interface | interface down |
-| +------+----------------------------------+--------------------+
-| | N | what is done in step N | what happens |
-+--------------+------+----------------------------------+--------------------+
-|test verdict | expected behavior, or SLA, pass/fail criteria |
-+--------------+--------------------------------------------------------------+
-
diff --git a/legacy/docs/annex.rst b/legacy/docs/annex.rst
deleted file mode 100644
index e8bf5555..00000000
--- a/legacy/docs/annex.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2016 ZTE Corp.
-
-
-*****
-Annex
-*****
-
-.. toctree::
- :maxdepth: 2
-
- _testcase_description.rst
-
-Downloads
-=========
-
-- :download:`Sample configuration <../download/sample_config.yaml>`
diff --git a/legacy/docs/apidocs/qtip_restful_api.rst b/legacy/docs/apidocs/qtip_restful_api.rst
deleted file mode 100644
index 7e48b95b..00000000
--- a/legacy/docs/apidocs/qtip_restful_api.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-****************
-QTIP restful api
-****************
-
-You can get all the QTIP restful api by http://restful_api.qtip.openzero.net/api/spec.html.
diff --git a/legacy/docs/benchmark-suites.rst b/legacy/docs/benchmark-suites.rst
deleted file mode 100644
index 84d1c647..00000000
--- a/legacy/docs/benchmark-suites.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2016 ZTE Corp.
-
-
-****************
-Benchmark Suites
-****************
-
-.. toctree::
- :maxdepth: 2
-
- _01-compute.rst
- _02-network.rst
- _03-storage.rst
diff --git a/legacy/docs/download/sample_config.yaml b/legacy/docs/download/sample_config.yaml
deleted file mode 100644
index 72c16bf4..00000000
--- a/legacy/docs/download/sample_config.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
- ## This is a sample Config.yaml file
-
-#Scenario would define the test case scenario.
-#The benchmark key contains the benchmark to run such as dhrystone,whetstone,dpi,ssh etc.
-# Host and server list the different machines on which the benchmark would run.
-# On machines listed within hosts, the benchmarks would run in parallel.
-# On machines listed within server, the benchmarks would run when the benechmarks running on hosts have been completed.
-# This has been used to control the folow of the testcase. For example, running the testcases on a vm vs hostmachin, we would like to run the test case serially. It should run first on the host machine and then on the vm. This testcase flow control could be used for other testcases to be developed such as those for networking.
-Scenario:
- benchmark: dhrystone
- host: machine_1, machine_2, virtualmachine_1
- server:
-
-
-#Context would define the environment on which to run:
-#Host Machine keys would contain Host_Machines/ Baremetal machines to run the benchmarks on
-#e.g in Host Machine , machine_1 and machine_2 are the bare metal machines. For each baremetal machine its IP(which should be reachable from the location on which you run QTIP), passwords and its role(host or server). If your installer is 'fuel' or 'compass' and you left baremetal machine IP empty,qtip will get compute node ip from installer automaticly.
-Context:
- Host_Machines:
- machine_1:
- ip: 172.18.0.16
- pw: Op3nStack
- role: host
- Virtual_Machines:
- virtualmachine_1:
- availability_zone: nova
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: server
-#Proxy_Environment key was optional.If all the Machines could access the public network, no need to define Proxy_Environment.Could be used later for getting http,https proxy infos which would be setted on all the Host_Manchines and Virtual_Machines.
- Proxy_Environment:
- http_proxy: http://10.20.0.1:8118
- https_proxy: http://10.20.0.1:8118
- no_proxy: localhost,127.0.0.1,10.20.*,192.168.*
-
-# A general description of the testcase. Could be used later for reports.
-Test_Description:
- Test_category: "Compute"
- Benchmark: "dhrystone"
- Overview: >
- ''' This test will run the dhrystone benchmark in serial on machine_1 and machine_2.\n
- if you wish to add a virtual machine add the following information under the Virtual_Machine tag
-
- virtualmachine_1:
- availability_zone:
- public_network:
- OS_image:
- flavor:
- role: '''
diff --git a/legacy/docs/index.rst b/legacy/docs/index.rst
deleted file mode 100644
index 241a2680..00000000
--- a/legacy/docs/index.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-################
-QTIP Configguide
-################
-
-.. toctree::
- :maxdepth: 2
-
- ./qtip_restful_api.rst
diff --git a/legacy/docs/introduction.rst b/legacy/docs/introduction.rst
deleted file mode 100644
index 3147f0aa..00000000
--- a/legacy/docs/introduction.rst
+++ /dev/null
@@ -1,381 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-
-************
-Introduction
-************
-
-This guide will serve as a first step to familiarize the user with how to
-run QTIP the first time when the user pull QTIP image on to their host machine.
-In order to install and config QTIP please follow the instructions in the
-configuration.rst located in docs/configguide/configuration.rst.
-
-QTIP Directory structure
-========================
-
-The QTIP directory has been sectioned off into multiple folders to facilitate
- segmenting information into relevant categories. The folders that concern
- the end user are `benchmarks/testplan/` and `benchmarks/suite/`.
-
-**testplan/:**
-
-This folder is used to store all the config files which are used to setup the
-environment prior to a test. This folder is further divided into opnfv pods
-which run QTIP. Inside each pod there are folders which contain the config files
-segmented based on test cases. Namely, these include, `Compute`, `Network` and
-`Storage`. The default folder is there for the end user who is interested in
-testing their infrastructure which is installed by fuel or compass but aren't
-part of a opnfv pod,and for opnfv CI.
-
-The structure of the directory for the user appears as follows
-::
-
- testplan/default/compute
- testplan/default/network
- testplan/default/storage
-
-The benchmarks that are part of the QTIP framework are listed under these
-folders. An example of the compute folder is shown below.
-Their naming convention is <BENCHMARK>_<VM/BM>.yaml
-::
-
- dhrystone_bm.yaml
- dhrystone_vm.yaml
- whetstone_vm.yaml
- whetstone_bm.yaml
- ssl_vm.yaml
- ssl_bm.yaml
- ramspeed_vm.yaml
- ramspeed_bm.yaml
- dpi_vm.yaml
- dpi_bm.yaml
-
-The above listed files are used to configure the environment. The VM/BM tag
-distinguishes between a test to be run on the Virtual Machine or the compute
-node itself, respectively.
-
-
-**benchmarks/suite/:**
-
-This folder contains three files, namely `compute`, `network` and `storage`.
-These files list the benchmarks are to be run by the QTIP framework. Sample
-compute test file is shown below
-::
-
- {
- "bm": [
- "dhrystone_bm.yaml",
- "whetstone_bm.yaml",
- "ramspeed_bm.yaml",
- "dpi_bm.yaml",
- "ssl_bm.yaml"
- ],
- "vm": [
- "dhrystone_vm.yaml",
- "whetstone_vm.yaml",
- "ramspeed_vm.yaml",
- "dpi_vm.yaml",
- "ssl_vm.yaml"
- ]
- }
-
-The compute file will now run all the benchmarks listed above one after
-another on the environment.
-
-Preparing a config file for test:
----------------------------------
-
-We will be using dhrystone as a example to list out the changes that the
-user will need to do in order to run the benchmark.
-
-Dhrystone on Compute Nodes:
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-QTIP framework can run benchmarks on the actual compute nodes as well. In
-order to run dhrystone on the compute nodes we will be editing the
-dhrystone_bm.yaml file.
-
-::
-
- Scenario:
- benchmark: dhrystone
- host: machine_1, machine_2
- server:
-
-The `Scenario` field is used by to specify the name of the benchmark to
-run as done by `benchmark: dhrystone`. The `host` and `server` tag are
-not used for the compute benchmarks but are included here to help the
-user `IF` they wish to control the execution. By default both machine_1
-and machine_2 will have dhrystone run on them in parallel but the user
-can change this so that machine_1 run dhrystone before machine_2. This
-will be elaborated in the `Context` tag.
-
-::
-
- Context:
- Host_Machines:
- machine_1:
- ip: 10.20.0.6
- pw:
- role: host
- machine_2:
- ip: 10.20.0.5
- pw:
- role: host
-
- Virtual_Machines:
-
-The `Context` tag helps the user list the number of compute nodes they want
- to run dhrystone on. The user can list all the compute nodes under the
- `Host_Machines` tag. All the machines under test must be listed under the
- `Host_Machines` and naming it incrementally higher. The `ip:` tag is used
- to specify the IP of the particular compute node.The `ip:` tag can be left
- blank when installer type is 'fuel',because QTIP will get ip
- from installer. The `pw:` tag can be left blank because QTIP uses its own
- key for ssh. In order to run dhrystone on one compute node at a time the user
- needs to edit the `role:` tag. `role: host` for machine_1 and `role: server`
- for machine_2 will allow for dhrystone to be run on machine_1 and then run
- on machine_2.
-
-::
-
-
- Test_Description:
- Test_category: "Compute"
- Benchmark: "dhrystone"
- Overview: >
- ''' This test will run the dhrystone benchmark in parallel on
- machine_1 and machine_2.
-
-The above field is purely for a description purpose to explain to the user
-the working of the test and is not fed to the framework.
-
-Sample dhrystone_bm.yaml file:
-------------------------------
-::
-
- Scenario:
- benchmark: dhrystone
- host: machine_1, machine_2
- server:
-
- Context:
- Host_Machines:
- machine_1:
- ip: 10.20.0.6
- pw:
- role: host
- machine_2:
- ip: 10.20.0.5
- pw:
- role: host
-
- Virtual_Machines:
-
-
- Test_Description:
- Test_category: "Compute"
- Benchmark: "dhrystone"
- Overview: >
- ''' This test will run the dhrystone benchmark in parallel on
- machine_1 and machine_2.\n
-
-Dhrystone on Virtual Machine:
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-To run dhrystone on the VMs we will be editing dhrystone_vm.yaml file.
-Snippets on the file are given below.
-
-::
-
- Scenario:
- benchmark: dhrystone
- host: virtualmachine_1, virtualmachine_2
- server:
-
-
-The `Scenario` field is used by to specify the name of the benchmark to
-run as done by `benchmark: dhrystone`. The `host` and `server` tag are
-not used for the compute benchmarks but are included here to help the
-user `IF` they wish to control the execution. By default both
-virtualmachine_1 and virtualmachine_2 will have dhrystone run on them
-in parallel but the user can change this so that virtualmachine_1 run
-dhrystone before virtualmachine_2. This will be elaborated in the
-`Context` tag.
-::
-
- Context:
- Host_Machines:
-
- Virtual_Machines:
- virtualmachine_1:
- availability_zone: compute1
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: host
- virtualmachine_2:
- availability_zone: compute2
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: host
-
-The `Context` tag helps the user list the number of VMs and their
-characteristic. The user can list all the VMs they want to bring up
-under the `Virtual_Machines:` tag. In the above example we will be
-bringing up two VMs. One on Compute1 and the other on Compute2. The
-user can change this as desired `NOTE: Please ensure you have the
-necessary compute nodes before listing under the 'availability_zone:'
-tag`. The rest of the options do not need to be modified by the user.
-
-Running dhrystone sequentially (Optional):
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In order to run dhrystone on one VM at a time the user needs to edit
-the `role:` tag. `role: host` for virtualmachine_1 and `role: server`
-for virtualmachine_2 will allow for dhrystone to be run on
-virtualmachine_1 and then run on virtualmachine_2.
-
-::
-
- Test_Description:
- Test_category: "Compute"
- Benchmark: "dhrystone"
- Overview:
- This test will run the dhrystone benchmark in parallel on
- virtualmachine_1 and virtualmachine_2
-
-The above field is purely for a decription purpose to explain to
-the user the working of the test and is not fed to the framework.
-
-Running dhrystone with proxy (Optional):
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-In order to run the dhrystone on the hosts or vms which can only access the
-public network by proxy, the user needs to add the `Proxy_Environment` info
-in `Context` tag.
-
-::
-
- Context:
- Host_Machines:
- machine_1:
- ip: 10.20.0.29
- pw:
- role: host
- machine_2:
- ip: 10.20.0.30
- pw:
- role: host
-
- Virtual_Machines:
-
- Proxy_Environment:
- http_proxy: http://10.20.0.1:8118
- https_proxy: http://10.20.0.1:8118
- no_proxy: localhost,127.0.0.1,10.20.*,192.168.*
-
-Sample dhrystone_vm.yaml file:
-------------------------------
-::
-
- Scenario:
- benchmark: dhrystone
- host: virtualmachine_1, virtualmachine_2
- server:
-
- Context:
- Host_Machines:
-
- Virtual_Machines:
- virtualmachine_1:
- availability_zone: compute1
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: host
- virtualmachine_2:
- availability_zone: compute2
- public_network: 'net04_ext'
- OS_image: QTIP_CentOS
- flavor: m1.large
- role: host
-
- Test_Description:
- Test_category: "Compute"
- Benchmark: "dhrystone"
- Overview: >
- This test will run the dhrystone benchmark in parallel on
- machine_1 and machine_2.\n
-
-Commands to run the Framework:
-------------------------------
-
-In order to start QTIP on the default lab please use the following commands (asssuming your installer
-is 'fuel' or 'compass', you use the config files in the benchmarks/testplan/default/ directory and listed the
-intended suite in the benchmarks/suite/<RELEVANT-SUITE-FILE>):
-
-First step is to export the necessary information to the environment and generate QTIP key pair.
-Please follow the instructions in the configuration.rst.
-
-Secondary step download the QTIP image and upload it to the Cloud.QTIP will use this image
-to create VM when test VM performance.
-::
-
- source docker/prepare_qtip_image.sh
-
-Running QTIP on the using `default` as the pod name and for the `compute` suite by cli.
-::
-
- python qtip.py -l default -f compute
-
-Running QTIP on the using 'default' as the pod name and for the 'compute' suite 'bm' type by restful api.
-::
-
- curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"compute", "type": "BM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs
-
-Running QTIP on the using 'default' as the pod name and for the 'compute' suite 'vm' type by restful api.
-::
-
- curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"compute", "type": "VM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs
-
-Running QTIP on the using `default` as the pod name and for the `network` suite by cli.
-::
-
- python qtip.py -l default -f network
-
-Running QTIP on the using 'default' as the pod name and for the 'network' suite 'bm' type by restful api.
-::
-
- curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"network", "type": "BM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs
-
-Running QTIP on the using `default` as the pod name and for the `storage` suite by cli.
-::
-
- python qtip.py -l default -f network
-
-Running QTIP on the using 'default' as the pod name and for the 'storage' suite 'bm' type by restful api.
-::
-
- curl --trace-ascii debug.txt -X POST -d '{ "installer_ip": "10.20.6.2","installer_type":"fuel", "suite_name":"storage", "type": "BM"}' -H "Content-Type: application/json" http://127.0.0.1:5000/api/v1.0/jobs
-
-Get running QTIP job status by restful api
-::
-
- curl --trace-ascii debug.txt -X GET http://127.0.0.1:5000/api/v1.0/jobs/job-id
- For example:
- curl --trace-ascii debug.txt -X GET http://127.0.0.1:5000/api/v1.0/jobs/5b71f035-3fd6-425c-9cc7-86acd3a04214
-
-Stop running QTIP job by restful api.The job will finish the current benchmark test and stop.
-::
-
- curl --trace-ascii debug.txt -X DELTET http://127.0.0.1:5000/api/v1.0/jobs/job-id
- For example:
- curl --trace-ascii debug.txt -X DELETE http://127.0.0.1:5000/api/v1.0/jobs/5b71f035-3fd6-425c-9cc7-86acd3a04214q
-
-Results:
---------
-In QTIP container, QTIP generates results in the `/home/opnfv/qtip/results/` directory are listed down under the particularly benchmark name. So all the results for dhrystone would be listed and time stamped.
diff --git a/legacy/docs/overview/index.rst b/legacy/docs/overview/index.rst
deleted file mode 100644
index 9a387360..00000000
--- a/legacy/docs/overview/index.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2016 ZTE Corp.
-
-
-
-#####################
-QTIP Project Overview
-#####################
-
-.. toctree::
- :maxdepth: 2
-
- ./overview.rst
diff --git a/legacy/docs/overview/overview.rst b/legacy/docs/overview/overview.rst
deleted file mode 100644
index 4fd42356..00000000
--- a/legacy/docs/overview/overview.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2015 Dell Inc.
-.. (c) 2016 ZTE Corp.
-
-********
-Overview
-********
-
-.. _QTIP: https://wiki.opnfv.org/platform_performance_benchmarking
-
-QTIP_ is an OPNFV Project.
-
-QTIP aims to benchmark OPNFV platforms through a "Bottom up" approach, testing
-bare-metal components first.
-
-The overall problem this project tries to solve is the general
-characterization of an OPNFV platform. It will focus on general performance
-questions that are common to the platform itself, or applicable to multiple
-OPNFV use cases. QTIP will provide the capability to quantify a platform's
-performance behavior in a standardized, rigorous, and open way.
diff --git a/legacy/driver/playbook/bwn_ng.yaml b/legacy/driver/playbook/bwn_ng.yaml
deleted file mode 100644
index c52cb14e..00000000
--- a/legacy/driver/playbook/bwn_ng.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: Install bwm-ng when CentOS
- yum:
- name: bwm-ng
- state: present
- when: ansible_os_family == "RedHat"
-
-- name: Install bwm-ng when Ubuntu
- apt:
- name: bwm-ng
- state: present
- update_cache: yes
- when: ansible_os_family == "Debian"
-
-- name: Run bwm-ng
- shell: bwm-ng -o plain -c 1 > bwm-dump.log
- args:
- chdir: '{{ dest_path }}' \ No newline at end of file
diff --git a/legacy/driver/playbook/top.yaml b/legacy/driver/playbook/top.yaml
deleted file mode 100644
index dfa0aff2..00000000
--- a/legacy/driver/playbook/top.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-- name: Collect cpu usage
- shell: top -bn1 > top.log
- args:
- chdir: '{{ dest_path }}'
diff --git a/legacy/scripts/__init__.py b/legacy/scripts/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/scripts/__init__.py
+++ /dev/null
diff --git a/legacy/scripts/ref_results/__init__.py b/legacy/scripts/ref_results/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/scripts/ref_results/__init__.py
+++ /dev/null
diff --git a/legacy/scripts/ref_results/compute_benchmarks_indices.py b/legacy/scripts/ref_results/compute_benchmarks_indices.py
deleted file mode 100644
index 936b58df..00000000
--- a/legacy/scripts/ref_results/compute_benchmarks_indices.py
+++ /dev/null
@@ -1,168 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from index_calculation import generic_index as get_index
-from index_calculation import get_reference
-from result_accum import result_concat as concat
-
-
-def dpi_index():
- dpi_dict = concat('results/dpi/')
- dpi_bm_ref = get_reference('compute', 'dpi_bm')
- dpi_bm_index = get_index(dpi_dict, 'dpi_bm', dpi_bm_ref, 'details', 'bps')
-
- dpi_vm_ref = get_reference('compute', 'dpi_vm')
- dpi_vm_index = get_index(dpi_dict, 'dpi_vm', dpi_vm_ref, 'details', 'bps')
- dpi_index = (dpi_bm_index + dpi_vm_index) / 2
- dpi_dict_i = {}
- dpi_dict_i['index'] = dpi_index
- dpi_dict_i['results'] = dpi_dict
- return dpi_dict_i
-
-
-def dhrystone_index():
-
- dhrystone_dict = concat('results/dhrystone/')
- dhrystone_single_bm_ref = get_reference('compute', 'dhrystone_bm', 'single_cpu')
- dhrystone_single_bm_index = get_index(dhrystone_dict, 'dhrystone_bm', dhrystone_single_bm_ref, 'details', 'single', 'score')
-
- dhrystone_multi_bm_ref = get_reference('compute', 'dhrystone_bm', 'multi_cpu')
- dhrystone_multi_bm_index = get_index(dhrystone_dict, 'dhrystone_bm', dhrystone_multi_bm_ref, 'details', 'multi', 'score')
-
- dhrystone_bm_index = (dhrystone_single_bm_index + dhrystone_multi_bm_index) / 2
-
- dhrystone_single_vm_ref = get_reference('compute', 'dhrystone_vm', 'single_cpu')
- dhrystone_single_vm_index = get_index(dhrystone_dict, 'dhrystone_vm', dhrystone_single_vm_ref, 'details', 'single', 'score')
-
- dhrystone_multi_vm_ref = get_reference('compute', 'dhrystone_vm', 'multi_cpu')
- dhrystone_multi_vm_index = get_index(dhrystone_dict, 'dhrystone_vm', dhrystone_multi_vm_ref, 'details', 'multi', 'score')
-
- dhrystone_vm_index = (dhrystone_single_vm_index + dhrystone_multi_vm_index) / 2
-
- dhrystone_index = (dhrystone_bm_index + dhrystone_vm_index) / 2
- dhrystone_dict_i = {}
- dhrystone_dict_i['index'] = dhrystone_index
- dhrystone_dict_i['results'] = dhrystone_dict
- return dhrystone_dict_i
-
-
-def whetstone_index():
-
- whetstone_dict = concat('results/whetstone/')
- whetstone_single_bm_ref = get_reference('compute', 'whetstone_bm', 'single_cpu')
- whetstone_single_bm_index = get_index(whetstone_dict, 'whetstone_bm', whetstone_single_bm_ref, 'details', 'single', 'score')
-
- whetstone_multi_bm_ref = get_reference('compute', 'whetstone_bm', 'multi_cpu')
- whetstone_multi_bm_index = get_index(whetstone_dict, 'whetstone_bm', whetstone_multi_bm_ref, 'details', 'multi', 'score')
-
- whetstone_bm_index = (whetstone_single_bm_index + whetstone_multi_bm_index) / 2
-
- whetstone_single_vm_ref = get_reference('compute', 'whetstone_vm', 'single_cpu')
- whetstone_single_vm_index = get_index(whetstone_dict, 'whetstone_vm', whetstone_single_vm_ref, 'details', 'single', 'score')
-
- whetstone_multi_vm_ref = get_reference('compute', 'whetstone_vm', 'multi_cpu')
- whetstone_multi_vm_index = get_index(whetstone_dict, 'whetstone_vm', whetstone_multi_vm_ref, 'details', 'multi', 'score')
-
- whetstone_vm_index = (whetstone_single_vm_index + whetstone_multi_vm_index) / 2
-
- whetstone_index = (whetstone_bm_index + whetstone_vm_index) / 2
- whetstone_dict_i = {}
- whetstone_dict_i['index'] = whetstone_index
- whetstone_dict_i['results'] = whetstone_dict
- return whetstone_dict_i
-
-
-def ramspeed_index():
-
- ramspeed_dict = concat('results/ramspeed/')
- ramspeed_int_bm_ref = get_reference('compute', 'ramspeed_bm', 'INTmem', 'Average (MB/s)')
- ramspeed_int_bm_index = get_index(ramspeed_dict, 'ramspeed_bm', ramspeed_int_bm_ref, 'details', 'int_bandwidth', 'average')
-
- ramspeed_float_bm_ref = get_reference('compute', 'ramspeed_bm', 'FLOATmem', 'Average (MB/s)')
- ramspeed_float_bm_index = get_index(ramspeed_dict, 'ramspeed_bm', ramspeed_float_bm_ref, 'details', 'float_bandwidth', 'average')
-
- ramspeed_bm_index = (ramspeed_int_bm_index + ramspeed_float_bm_index) / 2
-
- ramspeed_int_vm_ref = get_reference('compute', 'ramspeed_vm', 'INTmem', 'Average (MB/s)')
- ramspeed_int_vm_index = get_index(ramspeed_dict, 'ramspeed_vm', ramspeed_int_vm_ref, 'details', 'int_bandwidth', 'average')
-
- ramspeed_float_vm_ref = get_reference('compute', 'ramspeed_vm', 'FLOATmem', 'Average (MB/s)')
- ramspeed_float_vm_index = get_index(ramspeed_dict, 'ramspeed_vm', ramspeed_float_vm_ref, 'details', 'float_bandwidth', 'average')
-
- ramspeed_vm_index = (ramspeed_int_vm_index + ramspeed_float_vm_index) / 2
-
- ramspeed_index = (ramspeed_vm_index + ramspeed_bm_index) / 2
-
- ramspeed_dict_i = {}
- ramspeed_dict_i['index'] = ramspeed_index
- ramspeed_dict_i['results'] = ramspeed_dict
- return ramspeed_dict_i
-
-
-def ssl_index():
-
- ssl_dict = concat('results/ssl/')
-
- ssl_RSA512b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '512b')
- ssl_RSA1024b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '1024b')
- ssl_RSA2048b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '2048b')
- ssl_RSA4096b_bm_ref = get_reference('compute', 'ssl_bm', 'RSA', '4096b')
-
- ssl_AES16B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '16B')
- ssl_AES64B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '64B')
- ssl_AES256B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '256B')
- ssl_AES1024B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '1024B')
- ssl_AES8192B_bm_ref = get_reference('compute', 'ssl_bm', 'AES', '8192B')
-
- ssl_RSA512b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA512b_bm_ref, 'details', 'rsa_sig', '512_bits')
- ssl_RSA1024b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA1024b_bm_ref, 'details', 'rsa_sig', '1024_bits')
- ssl_RSA2048b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA2048b_bm_ref, 'details', 'rsa_sig', '2048_bits')
- ssl_RSA4096b_bm_index = get_index(ssl_dict, "ssl_bm", ssl_RSA4096b_bm_ref, 'details', 'rsa_sig', '4096_bits')
- ssl_RSA_bm_index = (ssl_RSA512b_bm_index + ssl_RSA1024b_bm_index + ssl_RSA2048b_bm_index + ssl_RSA4096b_bm_index) / 4
-
- ssl_AES16B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES16B_bm_ref, 'details', 'aes_128_cbc', '16B_block')
- ssl_AES64B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES64B_bm_ref, 'details', 'aes_128_cbc', '64B_block')
- ssl_AES256B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES256B_bm_ref, 'details', 'aes_128_cbc', '256B_block')
- ssl_AES1024B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES1024B_bm_ref, 'details', 'aes_128_cbc', '1024B_block')
- ssl_AES8192B_bm_index = get_index(ssl_dict, "ssl_bm", ssl_AES8192B_bm_ref, 'details', 'aes_128_cbc', '8192B_block')
- ssl_AES_bm_index = (ssl_AES16B_bm_index + ssl_AES64B_bm_index + ssl_AES256B_bm_index + ssl_AES1024B_bm_index + ssl_AES8192B_bm_index) / 5
-
- ssl_bm_index = (ssl_RSA_bm_index + ssl_AES_bm_index) / 2
-
- ssl_RSA512b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '512b')
- ssl_RSA1024b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '1024b')
- ssl_RSA2048b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '2048b')
- ssl_RSA4096b_vm_ref = get_reference('compute', 'ssl_vm', 'RSA', '4096b')
-
- ssl_AES16B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '16B')
- ssl_AES64B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '64B')
- ssl_AES256B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '256B')
- ssl_AES1024B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '1024B')
- ssl_AES8192B_vm_ref = get_reference('compute', 'ssl_vm', 'AES', '8192B')
-
- ssl_RSA512b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA512b_vm_ref, 'details', 'rsa_sig', '512_bits')
- ssl_RSA1024b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA1024b_vm_ref, 'details', 'rsa_sig', '1024_bits')
- ssl_RSA2048b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA2048b_vm_ref, 'details', 'rsa_sig', '2048_bits')
- ssl_RSA4096b_vm_index = get_index(ssl_dict, "ssl_vm", ssl_RSA4096b_vm_ref, 'details', 'rsa_sig', '4096_bits')
- ssl_RSA_vm_index = (ssl_RSA512b_vm_index + ssl_RSA1024b_vm_index + ssl_RSA2048b_vm_index + ssl_RSA4096b_vm_index) / 4
-
- ssl_AES16B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES16B_vm_ref, 'details', 'aes_128_cbc', '16B_block')
- ssl_AES64B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES64B_vm_ref, 'details', 'aes_128_cbc', '64B_block')
- ssl_AES256B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES256B_vm_ref, 'details', 'aes_128_cbc', '256B_block')
- ssl_AES1024B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES1024B_vm_ref, 'details', 'aes_128_cbc', '1024B_block')
- ssl_AES8192B_vm_index = get_index(ssl_dict, "ssl_vm", ssl_AES8192B_vm_ref, 'details', 'aes_128_cbc', '8192B_block')
- ssl_AES_vm_index = (ssl_AES16B_vm_index + ssl_AES64B_vm_index + ssl_AES256B_vm_index + ssl_AES1024B_vm_index + ssl_AES8192B_vm_index) / 5
-
- ssl_vm_index = (ssl_RSA_vm_index + ssl_AES_vm_index) / 2
-
- ssl_index = (ssl_bm_index + ssl_vm_index) / 2
-
- ssl_dict_i = {}
- ssl_dict_i['index'] = ssl_index
- ssl_dict_i['results'] = ssl_dict
- return ssl_dict_i
diff --git a/legacy/scripts/ref_results/index_calculation.py b/legacy/scripts/ref_results/index_calculation.py
deleted file mode 100644
index 14c2d4d2..00000000
--- a/legacy/scripts/ref_results/index_calculation.py
+++ /dev/null
@@ -1,49 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import json
-
-
-def compute_index(total_measured, ref_result, count):
- try:
- average = float(total_measured / count)
-
- except ZeroDivisionError:
- average = 0
- index = average / ref_result
- return index
-
-
-def get_reference(*args):
-
- with open('scripts/ref_results/reference.json') as reference_file:
- reference_djson = json.load(reference_file)
- for arg in args:
- ref_n = reference_djson.get(str(arg))
- reference_djson = reference_djson.get(str(arg))
- return ref_n
-
-
-def generic_index(dict_gen, testcase, reference_num, *args):
- c = len(args)
- count = 0
- total = 0
- result = 0
- for k, v in dict_gen.iteritems():
- dict_temp = dict_gen[k]
- if dict_gen[k]['name'] == '{0}.yaml'.format(testcase):
- count = count + 1
- for arg in args:
- if arg == args[c - 1]:
- try:
- result = float(dict_temp.get(str(arg)))
- except ValueError:
- result = float(dict_temp.get(str(arg))[:-1]) * 1000
- dict_temp = dict_temp.get(str(arg))
- total = total + result
- return compute_index(total, reference_num, count)
diff --git a/legacy/scripts/ref_results/network_benchmarks_indices.py b/legacy/scripts/ref_results/network_benchmarks_indices.py
deleted file mode 100644
index 67980ee9..00000000
--- a/legacy/scripts/ref_results/network_benchmarks_indices.py
+++ /dev/null
@@ -1,28 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from index_calculation import generic_index as get_index
-from index_calculation import get_reference
-from result_accum import result_concat as concat
-
-
-def iperf_index():
- iperf_dict = concat('results/iperf/')
- iperf_bm_ref = get_reference('network', 'iperf_bm', 'throughput received(b/s)')
- iperf_bm_index = get_index(iperf_dict, 'iperf_bm', iperf_bm_ref, 'details', 'bandwidth', 'received_throughput')
- iperf_vm_ref = get_reference('network', 'iperf_vm', 'throughput received(b/s)')
- iperf_vm_index = get_index(iperf_dict, 'iperf_vm', iperf_vm_ref, 'details', 'bandwidth', 'received_throughput')
-
- iperf_vm_2_ref = get_reference('network', 'iperf_vm_2', 'throughput received(b/s)')
- iperf_vm_2_index = get_index(iperf_dict, 'iperf_vm_2', iperf_vm_2_ref, 'details', 'bandwidth', 'received_throughput')
- iperf_index = float(iperf_bm_index + iperf_vm_index + iperf_vm_2_index) / 3
- print iperf_index
- iperf_dict_i = {}
- iperf_dict_i['index'] = iperf_index
- iperf_dict_i['results'] = iperf_dict
- return iperf_dict_i
diff --git a/legacy/scripts/ref_results/reference.json b/legacy/scripts/ref_results/reference.json
deleted file mode 100644
index cfcbfc3b..00000000
--- a/legacy/scripts/ref_results/reference.json
+++ /dev/null
@@ -1,97 +0,0 @@
-{
- "compute": {
- "dhrystone_bm": {
- "multi_cpu": 103362.1,
- "single_cpu": 3231.7
- },
- "dhrystone_vm": {
- "multi_cpu": 10585.8,
- "single_cpu": 2953.6
- },
- "dpi_bm": 8.12,
- "dpi_vm": 22.12,
- "ramspeed_bm": {
- "FLOATmem": {
- "Average (MB/s)": 9758.79
- },
- "INTmem": {
- "Average (MB/s)": 12268.38
- }
- },
- "ramspeed_vm": {
- "FLOATmem": {
- "Average (MB/s)": 9064.09
- },
- "INTmem": {
- "Average (MB/s)": 12147.59
- }
- },
- "ssl_bm": {
- "AES": {
- "1024B": 808861020,
- "16B": 735490250,
- "256B": 803323650,
- "64B": 788429210,
- "8192B": 807701160
- },
- "RSA": {
- "1024b": 7931.44,
- "2048b": 1544.3,
- "4096b": 161.92,
- "512b": 22148.9
- }
- },
- "ssl_vm": {
- "AES": {
- "1024B": 808861020,
- "16B": 735490250,
- "256B": 803323650,
- "64B": 788429210,
- "8192B": 807701160
- },
- "RSA": {
- "1024b": 7931.44,
- "2048b": 1544.3,
- "4096b": 161.92,
- "512b": 22148.9
- }
- },
- "whetstone_bm": {
- "multi_cpu": 41483.3,
- "single_cpu": 806.1
- },
- "whetstone_vm": {
- "multi_cpu": 2950.6,
- "single_cpu": 789.0
- }
- },
- "network": {
- "iperf_bm": {
- "throughput received(b/s)": 944473000.0
- },
- "iperf_vm": {
- "throughput received(b/s)": 14416700000.0
- },
- "iperf_vm_2": {
- "throughput received(b/s)": 2461530000.0
- }
- },
- "storage": {
- "fio_bm": {
- "read": {
- "IOPS": 6693
- },
- "write": {
- "IOPS": 6688
- }
- },
- "fio_vm": {
- "read": {
- "IOPS": 2239
- },
- "write": {
- "IOPS": 2237
- }
- }
- }
-} \ No newline at end of file
diff --git a/legacy/scripts/ref_results/result_accum.py b/legacy/scripts/ref_results/result_accum.py
deleted file mode 100644
index 6eb169e8..00000000
--- a/legacy/scripts/ref_results/result_accum.py
+++ /dev/null
@@ -1,39 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import json
-
-
-def result_concat(targ_dir):
- list_vm = []
- list_bm = []
- diction = {}
-
- for file in os.listdir(targ_dir):
- if file.endswith(".json"):
- if file.startswith("instance"):
- print str(file)
- list_vm.append(file)
- else:
- list_bm.append(file)
- l = len(list_bm)
- k = len(list_vm)
-
- for x in range(0, l):
- file_t = list_bm[x]
- with open(targ_dir + file_t) as result_file:
- result_djson = json.load(result_file)
- diction['Baremetal' + str(int(x + 1))] = result_djson
-
- for x in range(0, k):
- file_t = list_vm[x]
- with open(targ_dir + file_t) as result_file:
- result_djson = json.load(result_file)
- diction['Virtual Machine ' + str(x + 1)] = result_djson
- return diction
diff --git a/legacy/scripts/ref_results/storage_benchmarks_indices.py b/legacy/scripts/ref_results/storage_benchmarks_indices.py
deleted file mode 100644
index e87fe36b..00000000
--- a/legacy/scripts/ref_results/storage_benchmarks_indices.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from index_calculation import generic_index as get_index
-from index_calculation import get_reference
-from result_accum import result_concat as concat
-
-
-def fio_index():
- fio_dict = concat('results/fio/')
- fio_r_bm_ref = get_reference('storage', 'fio_bm', 'read', 'IOPS')
- fio_r_bm_index = get_index(fio_dict, 'fio_bm', fio_r_bm_ref, 'details', 'job_0', 'read', 'io_ps')
- fio_w_bm_ref = get_reference('storage', 'fio_bm', 'write', 'IOPS')
- fio_w_bm_index = get_index(fio_dict, 'fio_bm', fio_w_bm_ref, 'details', 'job_0', 'write', 'io_ps')
-
- fio_bm_index = (fio_r_bm_index + fio_w_bm_index) / 2
-
- fio_r_vm_ref = get_reference('storage', 'fio_vm', 'read', 'IOPS')
- fio_r_vm_index = get_index(fio_dict, 'fio_vm', fio_r_vm_ref, 'details', 'job_0', 'read', 'io_ps')
-
- fio_w_vm_ref = get_reference('storage', 'fio_vm', 'write', 'IOPS')
- fio_w_vm_index = get_index(fio_dict, 'fio_vm', fio_w_vm_ref, 'details', 'job_0', 'write', 'io_ps')
-
- fio_vm_index = (fio_r_vm_index + fio_w_vm_index) / 2
-
- fio_index = (fio_bm_index + fio_vm_index) / 2
- print fio_index
-
- fio_dict_i = {}
- fio_dict_i['index'] = fio_index
- fio_dict_i['results'] = fio_dict
- return fio_dict_i
diff --git a/legacy/scripts/ref_results/suite_result.py b/legacy/scripts/ref_results/suite_result.py
deleted file mode 100644
index 66213391..00000000
--- a/legacy/scripts/ref_results/suite_result.py
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 ZTE Corp and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import json
-import importlib
-import sys
-from qtip.utils import logger_utils
-from os.path import expanduser
-
-logger = logger_utils.QtipLogger('suite_result').get
-
-
-def get_benchmark_result(benchmark_name, suite_name):
- benchmark_indices = importlib.import_module('scripts.ref_results'
- '.{0}_benchmarks_indices'.format(suite_name))
- methodToCall = getattr(benchmark_indices, '{0}_index'.format(benchmark_name))
- return methodToCall()
-
-
-def get_suite_result(suite_name):
- suite_dict = {}
- suite_bench_list = {'compute': ['DPI', 'Dhrystone', 'Whetstone', 'SSL', 'RamSpeed'],
- 'storage': ['FIO'],
- 'network': ['IPERF']}
- temp = 0
- l = len(suite_bench_list[suite_name])
- for benchmark in suite_bench_list[suite_name]:
- try:
- suite_dict[benchmark] = get_benchmark_result(benchmark.lower(), suite_name)
- temp = temp + float(suite_dict[benchmark]['index'])
- except OSError:
- l = l - 1
- pass
-
- if l == 0:
- logger.info("No {0} suite results found".format(suite_name))
- return False
- else:
- suite_index = temp / l
- suite_dict_f = {'index': suite_index,
- 'suite_results': suite_dict}
- result_path = expanduser('~') + '/qtip/results'
- with open('{0}/{1}_result.json'.format(result_path, suite_name), 'w+') as result_json:
- json.dump(suite_dict_f, result_json, indent=4, sort_keys=True)
- return True
-
-
-def main():
- get_suite_result(sys.argv[1])
-
-
-if __name__ == "__main__":
- main()
diff --git a/legacy/tests/__init__.py b/legacy/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/tests/__init__.py
+++ /dev/null
diff --git a/legacy/tests/api/__init__.py b/legacy/tests/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/tests/api/__init__.py
+++ /dev/null
diff --git a/legacy/tests/api/test_server.py b/legacy/tests/api/test_server.py
deleted file mode 100644
index bf316f5d..00000000
--- a/legacy/tests/api/test_server.py
+++ /dev/null
@@ -1,131 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import json
-import time
-
-import mock
-import pytest
-
-import qtip.api.cmd.server as server
-
-
-def setup_module():
- server.add_routers()
-
-
-@pytest.fixture
-def app():
- return server.app
-
-
-@pytest.fixture
-def app_client(app):
- client = app.test_client()
- return client
-
-
-def side_effect_sleep(sleep_time):
- time.sleep(sleep_time)
-
-
-def side_effect_pass():
- pass
-
-
-class TestClass:
- @pytest.mark.parametrize("body, expected", [
- ({'installer_type': 'fuel',
- 'installer_ip': '10.20.0.2'},
- {'job_id': '',
- 'installer_type': 'fuel',
- 'installer_ip': '10.20.0.2',
- 'pod_name': 'default',
- 'suite_name': 'compute',
- 'max_minutes': 60,
- 'type': 'BM',
- 'testdb_url': None,
- 'node_name': None,
- 'state': 'finished',
- 'state_detail': [{'state': 'finished', 'benchmark': 'dhrystone_bm.yaml'},
- {'state': 'finished', 'benchmark': 'whetstone_bm.yaml'},
- {'state': 'finished', 'benchmark': 'ramspeed_bm.yaml'},
- {'state': 'finished', 'benchmark': 'dpi_bm.yaml'},
- {'state': 'finished', 'benchmark': 'ssl_bm.yaml'}],
- 'result': 0}),
- ({'installer_type': 'fuel',
- 'installer_ip': '10.20.0.2',
- 'pod_name': 'default',
- 'max_minutes': 20,
- 'suite_name': 'compute',
- 'type': 'VM',
- 'benchmark_name': 'dhrystone_vm.yaml',
- 'testdb_url': 'http://testresults.opnfv.org/test/api/v1',
- 'node_name': 'zte-pod2'},
- {'job_id': '',
- 'installer_type': 'fuel',
- 'installer_ip': '10.20.0.2',
- 'pod_name': 'default',
- 'suite_name': 'compute',
- 'max_minutes': 20,
- 'type': 'VM',
- 'testdb_url': 'http://testresults.opnfv.org/test/api/v1',
- 'node_name': 'zte-pod2',
- 'state': 'finished',
- 'state_detail': [{u'state': u'finished', u'benchmark': u'dhrystone_vm.yaml'}],
- 'result': 0})
- ])
- @mock.patch('qtip.utils.args_handler.prepare_and_run_benchmark')
- def test_post_get_delete_job_successful(self, mock_args_handler, app_client, body, expected):
- mock_args_handler.return_value = {'result': 0,
- 'detail': {'host': [(u'10.20.6.14', {'unreachable': 0,
- 'skipped': 13,
- 'ok': 27,
- 'changed': 26,
- 'failures': 0}),
- ('localhost', {'unreachable': 0,
- 'skipped': 0,
- 'ok': 6,
- 'changed': 6,
- 'failures': 0}),
- (u'10.20.6.13', {'unreachable': 0,
- 'skipped': 13,
- 'ok': 27,
- 'changed': 26,
- 'failures': 0})]}}
-
- reply = app_client.post("/api/v1.0/jobs", data=body)
- print(reply.data)
- id = json.loads(reply.data)['job_id']
- expected['job_id'] = id
- post_process = ''
- while post_process != 'finished':
- get_reply = app_client.get("/api/v1.0/jobs/%s" % id)
- reply_data = json.loads(get_reply.data)
- post_process = reply_data['state']
- print(reply_data)
- assert len(filter(lambda x: reply_data[x] == expected[x], expected.keys())) == len(expected)
- delete_reply = app_client.delete("/api/v1.0/jobs/%s" % id)
- assert "successful" in delete_reply.data
-
- @pytest.mark.parametrize("body, expected", [
- ([{'installer_type': 'fuel',
- 'installer_ip': '10.20.0.2'},
- {'installer_type': 'compass',
- 'installer_ip': '192.168.20.50'}],
- ['job_id',
- 'It already has one job running now!'])
- ])
- @mock.patch('qtip.utils.args_handler.prepare_and_run_benchmark',
- side_effect=[side_effect_sleep(0.5), side_effect_pass])
- def test_post_two_jobs_unsuccessful(self, mock_args_hanler, app_client, body, expected):
- reply_1 = app_client.post("/api/v1.0/jobs", data=body[0])
- reply_2 = app_client.post("/api/v1.0/jobs", data=body[1])
- assert expected[0] in json.loads(reply_1.data).keys()
- app_client.delete("/api/v1.0/jobs/%s" % json.loads(reply_1.data)['job_id'])
- assert expected[1] in json.dumps(reply_2.data)
diff --git a/legacy/tests/create_zones_test.py b/legacy/tests/create_zones_test.py
deleted file mode 100644
index 1aa37477..00000000
--- a/legacy/tests/create_zones_test.py
+++ /dev/null
@@ -1,118 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import pytest
-import mock
-from mock import Mock, MagicMock
-import os
-from qtip.utils.create_zones import AvailabilityZone
-
-return_list = []
-
-
-def get_agg_mock(host):
- agg = Mock()
- agg.name = host
- agg.id = host
- return agg
-
-
-class HyperMock(MagicMock):
- def list(self):
- mock_hypervisor = [Mock(service={'host': '10.20.0.4'}), Mock(service={'host': '10.20.0.5'})]
- return mock_hypervisor
-
-
-class AggMock(MagicMock):
- def get_details(self, agg_id):
- print "get_details:{0}".format(agg_id)
- return Mock(hosts=[])
-
- def create(self, host, agg):
- print "create:{0}:{1}".format(host, agg)
- return agg
-
- def list(self):
- return return_list
-
- def delete(self, agg_id):
- print "delete:{0}".format(agg_id)
- pass
-
- def add_host(self, aggregate, host):
- print "add_host:{0}:{1}".format(aggregate, host)
- pass
-
- def remove_host(self, agg_id, host):
- print "remove_host:{0}:{1}".format(agg_id, host)
- pass
-
-
-class NovaMock(MagicMock):
- hypervisors = HyperMock()
- aggregates = AggMock()
-
-
-@pytest.mark.xfail(reason="unstable result")
-class TestClass:
- @pytest.mark.parametrize("test_input, expected", [
- (['compute1', 'compute2'],
- ['create:compute1:compute1',
- 'add_host:compute1:10.20.0.4',
- 'create:compute2:compute2',
- 'add_host:compute2:10.20.0.5']),
- (['compute1'],
- ['create:compute1:compute1',
- 'add_host:compute1:10.20.0.4']),
- ])
- @mock.patch('qtip.utils.create_zones.client', autospec=True)
- @mock.patch('qtip.utils.create_zones.v2', autospec=True)
- @mock.patch('qtip.utils.create_zones.session')
- def test_create_zones_success(self, mock_keystone_session, mock_keystone_v2, mock_nova_client, test_input, expected, capfd):
- nova_obj = NovaMock()
- mock_nova_client.Client.return_value = nova_obj()
- k = mock.patch.dict(os.environ, {'OS_AUTH_URL': 'http://172.10.0.5:5000',
- 'OS_USERNAME': 'admin',
- 'OS_PASSWORD': 'admin',
- 'OS_TENANT_NAME': 'admin'})
- k.start()
- azone = AvailabilityZone()
- azone.create_aggs(test_input)
- k.stop()
- resout, reserr = capfd.readouterr()
- for x in expected:
- assert x in resout
-
- @pytest.mark.parametrize("test_input, expected", [
- ([get_agg_mock('10.20.0.4'), get_agg_mock('10.20.0.5')],
- ['get_details:10.20.0.4',
- 'delete:10.20.0.4',
- 'get_details:10.20.0.5',
- 'delete:10.20.0.5']),
- ([],
- []),
- ])
- @mock.patch('qtip.utils.create_zones.client', autospec=True)
- @mock.patch('qtip.utils.create_zones.v2', autospec=True)
- @mock.patch('qtip.utils.create_zones.session')
- def test_clean_all_aggregates(self, mock_keystone_session, mock_keystone_v2, mock_nova_client, test_input, expected, capfd):
- global return_list
- return_list = test_input
- nova_obj = NovaMock()
- mock_nova_client.Client.return_value = nova_obj()
- k = mock.patch.dict(os.environ, {'OS_AUTH_URL': 'http://172.10.0.5:5000',
- 'OS_USERNAME': 'admin',
- 'OS_PASSWORD': 'admin',
- 'OS_TENANT_NAME': 'admin'})
- k.start()
- azone = AvailabilityZone()
- azone.clean_all_aggregates()
- k.stop()
- resout, reserr = capfd.readouterr()
- for x in expected:
- assert x in resout
diff --git a/legacy/tests/functional/__init__.py b/legacy/tests/functional/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/tests/functional/__init__.py
+++ /dev/null
diff --git a/legacy/tests/functional/yaml_schema_test.py b/legacy/tests/functional/yaml_schema_test.py
deleted file mode 100644
index 3c7994a5..00000000
--- a/legacy/tests/functional/yaml_schema_test.py
+++ /dev/null
@@ -1,24 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import os.path
-from pykwalify.core import Core
-
-
-class TestClass:
- def test_schema_success(self):
- for root, dirs, files in os.walk("test_cases"):
- for name in files:
- print root + "/" + name
- if "_bm" in name:
- schema = "tests/schema/test_bm_schema.yaml"
- if "_vm" in name:
- schema = "tests/schema/test_vm_schema.yaml"
- c = Core(source_file=root + "/" + name, schema_files=[schema])
- c.validate(raise_exception=True)
diff --git a/legacy/tests/helper/perftest.yaml b/legacy/tests/helper/perftest.yaml
deleted file mode 100644
index 57948b62..00000000
--- a/legacy/tests/helper/perftest.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 taseer94@gmail.com and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
- tests:
- - command: ['perftest', 'run']
- output: "Run a perftest\n"
diff --git a/legacy/tests/helper/suite.yaml b/legacy/tests/helper/suite.yaml
deleted file mode 100644
index 84bf9239..00000000
--- a/legacy/tests/helper/suite.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 taseer94@gmail.com and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
- tests:
- - command: ['suite', 'run']
- output: "Run a suite\n"
-
diff --git a/legacy/tests/helper/version.yaml b/legacy/tests/helper/version.yaml
deleted file mode 100644
index 59be4256..00000000
--- a/legacy/tests/helper/version.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 taseer94@gmail.com and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-
- tests:
- - command: ['version', 'list']
- output: "Lists all the different versions\n"
-
- - command: ['version', 'install', 'Colorado']
- output: "Install: Colorado\n"
-
- - command: ['version', 'uninstall', 'Arno']
- output: "Uninstall: Arno\n"
-
diff --git a/legacy/tests/spawn_vm_test.py b/legacy/tests/spawn_vm_test.py
deleted file mode 100644
index ac58db27..00000000
--- a/legacy/tests/spawn_vm_test.py
+++ /dev/null
@@ -1,64 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import pytest
-import mock
-from mock import Mock, MagicMock
-import os
-from qtip.utils.spawn_vm import SpawnVM
-
-
-class KeystoneMock(MagicMock):
- auth_token = Mock()
- v2_0 = Mock()
-
-
-class StackMock(MagicMock):
- status = 'COMPLETE'
- outputs = [{'output_key': 'availability_instance_1',
- 'output_value': 'output_value_1'},
- {'output_key': 'instance_ip_1',
- "output_value": "172.10.0.154"},
- {"output_key": "instance_PIP_1",
- "output_value": "10.10.17.5"}]
-
-
-class HeatMock(MagicMock):
- def list(self):
- return []
-
- def get(self, stackname):
- return StackMock()
-
- def create(self, stack_name, template):
- pass
-
-
-class TestClass:
- @pytest.mark.parametrize("test_input, expected", [
- ({'availability_zone': ['compute1', 'compute1'],
- 'OS_image': ['QTIP_CentOS', 'QTIP_CentOS'],
- 'public_network': ['admin-floating_net', 'admin-floating_net'],
- 'flavor': ['m1.large', 'm1.large'],
- 'role': ['1-server', '2-host']},
- [('172.10.0.154', '')]),
- ])
- @mock.patch('qtip.utils.spawn_vm.Env_setup')
- @mock.patch('qtip.utils.spawn_vm.AvailabilityZone')
- @mock.patch('qtip.utils.spawn_vm.keystoneclient.v2_0', autospec=True)
- @mock.patch('qtip.utils.spawn_vm.heatclient.client', autospec=True)
- def test_create_zones_success(self, mock_heat, mock_keystone,
- mock_zone, mock_setup, test_input, expected):
- open('./config/QtipKey.pub', 'a').close()
- mock_heat.Client.return_value = Mock(stacks=HeatMock())
- k = mock.patch.dict(os.environ, {'INSTALLER_TYPE': 'fuel'})
- k.start()
- SpawnVM(test_input)
- k.stop()
- os.remove('./config/QtipKey.pub')
- mock_setup.ip_pw_list.append.assert_called_with(expected[0])
diff --git a/legacy/utils/__init__.py b/legacy/utils/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/utils/__init__.py
+++ /dev/null
diff --git a/legacy/utils/create_zones.py b/legacy/utils/create_zones.py
deleted file mode 100644
index 5e378c83..00000000
--- a/legacy/utils/create_zones.py
+++ /dev/null
@@ -1,86 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Dell Inc, ZTE and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from keystoneclient.auth.identity import v2
-from keystoneclient import session
-from novaclient import client
-import os
-import random
-import logger_utils
-
-logger = logger_utils.QtipLogger('create_zones').get
-
-
-class AvailabilityZone:
-
- def __init__(self):
- self._keystone_client = None
- self._nova_client = None
-
- def _get_keystone_client(self):
- """returns a keystone client instance"""
-
- if self._keystone_client is None:
- '''
- self._keystone_client = keystoneclient.v2_0.client.Client(
- auth_url=os.environ.get('OS_AUTH_URL'),
- username=os.environ.get('OS_USERNAME'),
- password=os.environ.get('OS_PASSWORD'),
- tenant_name=os.environ.get('OS_TENANT_NAME'))
- '''
- auth = v2.Password(auth_url=os.environ.get('OS_AUTH_URL'),
- username=os.environ.get('OS_USERNAME'),
- password=os.environ.get('OS_PASSWORD'),
- tenant_name=os.environ.get('OS_TENANT_NAME'))
-
- sess = session.Session(auth=auth)
- else:
- return self._keystone_client
-
- return sess
-
- def _get_nova_client(self):
- if self._nova_client is None:
- keystone = self._get_keystone_client()
- self._nova_client = client.Client('2', session=keystone)
- return self._nova_client
-
- def clean_all_aggregates(self):
- logger.info("clean all aggregates")
- nova = self._get_nova_client()
- agg_list = nova.aggregates.list()
-
- for agg in agg_list:
- agg_info = nova.aggregates.get_details(agg.id)
- agg_hosts = agg_info.hosts
- if len(agg_hosts):
- for host in agg_hosts:
- nova.aggregates.remove_host(agg.id, host)
- nova.aggregates.delete(agg.id)
-
- def create_aggs(self, args):
- azone_list = list(set(args))
- azone_list.sort()
-
- nova = self._get_nova_client()
- hyper_list = nova.hypervisors.list()
-
- if len(azone_list) > len(hyper_list):
- logger.error("required available zones > compute nodes")
- return None
-
- compute_nodes = map(lambda x: x.service['host'], hyper_list)
- sample_nodes = random.sample(compute_nodes, len(azone_list))
- sample_nodes.sort()
-
- for index, item in enumerate(azone_list):
- logger.info("create aggregates: %s" % str(item))
- agg_id = nova.aggregates.create(item, item)
-
- logger.info("add host: %s" % sample_nodes[index])
- nova.aggregates.add_host(aggregate=agg_id, host=sample_nodes[index])
diff --git a/legacy/utils/dashboard/__init__.py b/legacy/utils/dashboard/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/utils/dashboard/__init__.py
+++ /dev/null
diff --git a/legacy/utils/dashboard/pushtoDB.py b/legacy/utils/dashboard/pushtoDB.py
deleted file mode 100644
index ce54aebd..00000000
--- a/legacy/utils/dashboard/pushtoDB.py
+++ /dev/null
@@ -1,82 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import requests
-import json
-import datetime
-import os
-import sys
-from qtip.utils import logger_utils
-
-logger = logger_utils.QtipLogger('push_db').get
-
-TEST_DB = 'http://testresults.opnfv.org/test/api/v1'
-
-suite_list = [('compute_result.json', 'compute_test_suite'),
- ('network_result.json', 'network_test_suite'),
- ('storage_result.json', 'storage_test_suite')]
-payload_list = {}
-
-
-def push_results_to_db(db_url, case_name, payload, installer, pod_name):
-
- url = db_url + "/results"
- creation_date = str(datetime.datetime.utcnow().isoformat())
-
- params = {"project_name": "qtip", "case_name": case_name,
- "pod_name": pod_name, "installer": installer, "start_date": creation_date,
- "version": "test", "details": payload}
-
- headers = {'Content-Type': 'application/json'}
- logger.info('pod_name:{0},installer:{1},creation_data:{2}'.format(pod_name,
- installer,
- creation_date))
- # temporary code, will be deleted after Bigergia dashboard is ready
- try:
- qtip_testapi_url = "http://testapi.qtip.openzero.net/results"
- qtip_testapi_r = requests.post(qtip_testapi_url, data=json.dumps(params), headers=headers)
- logger.info('Pushing Results to qtip_testapi: %s'.format(qtip_testapi_r))
- except:
- logger.info("Pushing Results to qtip_testapi Error:{0}".format(sys.exc_info()[0]))
-
- try:
- r = requests.post(url, data=json.dumps(params), headers=headers)
- logger.info(r)
- return True
- except:
- logger.info("Error:{0}".format(sys.exc_info()[0]))
- return False
-
-
-def populate_payload(suite_list):
-
- global payload_list
- for k, v in suite_list:
-
- if os.path.isfile('results/' + str(k)):
- payload_list[k] = v
-
-
-def main():
-
- global payload_list
- populate_payload(suite_list)
- if payload_list:
- logger.info(payload_list)
- for suite, case in payload_list.items():
- with open('results/' + suite, 'r') as result_file:
- j = json.load(result_file)
- push_results_to_db(TEST_DB, case, j,
- os.environ['INSTALLER_TYPE'],
- os.environ['NODE_NAME'])
- elif not payload_list:
- logger.info('Results not found')
-
-
-if __name__ == "__main__":
- main()
diff --git a/legacy/utils/report/__init__.py b/legacy/utils/report/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/utils/report/__init__.py
+++ /dev/null
diff --git a/legacy/utils/report/get_indices.py b/legacy/utils/report/get_indices.py
deleted file mode 100644
index 42db6584..00000000
--- a/legacy/utils/report/get_indices.py
+++ /dev/null
@@ -1,16 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import json
-
-
-def get_index(suite):
- with open('../../results/' + suite + '.json') as result_file:
- result_djson = json.load(result_file)
- index = result_djson['index']
- return index
diff --git a/legacy/utils/report/get_results.py b/legacy/utils/report/get_results.py
deleted file mode 100644
index 6df88235..00000000
--- a/legacy/utils/report/get_results.py
+++ /dev/null
@@ -1,58 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import json
-
-
-def report_concat(targ_dir, testcase):
- machine_temp = []
- machines = []
-
- for file in os.listdir(targ_dir):
- if file.endswith(".json"):
- machine_temp.append(file)
-
- l = len(machine_temp)
-
- for x in range(0, l):
- file_t = machine_temp[x]
- with open(targ_dir + file_t) as result_file:
- result_djson = json.load(result_file)
- if result_djson['1 Testcase Name'] == str(testcase):
- machines.append(result_djson)
- return machines
-
-
-def space_count(l):
- spc = ''
- for x in range(l):
- spc = spc + ' '
- return spc
-
-
-def custom_dict(list1, list2, k):
- string_1 = ''
- for num_1 in range(0, len(list1)):
- string_1 = string_1 + space_count(k) + str(list1[num_1][0]) + "=" + str(list2[num_1]) + "\n"
- return string_1
-
-
-def generate_result(dict_a, k):
- list_1 = []
- list_2 = []
- count = 0
- for i, j in sorted(dict_a.iteritems()):
- list_1.append([])
- list_1[count].append(i)
- if (str(type(dict_a.get(i)))) == "<type 'dict'>":
- list_2.append(str("\n" + generate_result(dict_a.get(i), int(k + 1))))
- else:
- list_2.append(dict_a.get(i))
- count = count + 1
- return custom_dict(list_1, list_2, k)
diff --git a/legacy/utils/report/qtip_graph.py b/legacy/utils/report/qtip_graph.py
deleted file mode 100644
index 68ed660f..00000000
--- a/legacy/utils/report/qtip_graph.py
+++ /dev/null
@@ -1,38 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import matplotlib
-import matplotlib.pyplot as plt
-import numpy as np
-
-matplotlib.use('Agg')
-
-
-def plot_indices(a, b, c):
- N = 3
- ind = np.arange(N)
- y_axis = (a, b, c)
- width = 0.35
- f = plt.figure()
- ax = f.gca()
- ax.set_autoscale_on(True)
- my_bars = ax.bar(ind, y_axis, width, color='b')
- ax.set_ylabel('Index Score*')
- ax.set_xlabel('Suite')
- ax.set_title(' QTIP benchmark scores')
- ax.axis('on')
- my_bars = ax.bar(ind, y_axis, width)
- ax.set_xticks(ind + width / 2)
- ax.set_xticklabels(['Compute', 'Storage', 'Network'])
- ax.axis([0, 3, 0, 1.25])
- f.text(0.7, 0.01, '* With Comparison to Refernece POD', fontsize=9)
-
- for rect in my_bars:
- height = rect.get_height()
- ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height, height, ha='center', va='bottom')
- f.savefig('qtip_graph.jpeg')
diff --git a/legacy/utils/report/qtip_report.py b/legacy/utils/report/qtip_report.py
deleted file mode 100644
index 1097df5f..00000000
--- a/legacy/utils/report/qtip_report.py
+++ /dev/null
@@ -1,117 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
-from reportlab.lib.styles import getSampleStyleSheet
-from reportlab.lib.units import inch
-from reportlab.lib.pagesizes import letter
-import qtip_graph as graph
-import get_indices as results
-from get_results import report_concat
-from get_results import generate_result
-
-
-def dump_result(Stor, directory, testcase):
- try:
- lower_s = testcase.lower()
- Stor.append(Paragraph(testcase, Style['h3']))
- l1 = report_concat(directory, lower_s)
- l = 1
- for a in l1:
- Stor.append(Paragraph(testcase + " result_" + str(l), Style['h5']))
- raw_string = generate_result(a, 0)
- replaced_string = raw_string.replace('\n', '<br/> ').replace(' ', '&nbsp;')
- Stor.append(Paragraph(replaced_string, Style['BodyText']))
- l = l + 1
- except OSError:
- print "Results for {0} not found".format(testcase)
-
-
-doc = SimpleDocTemplate("../../results/QTIP_results.pdf", pagesize=letter,
- rightMargin=72, leftMargin=72,
- topMargin=72, bottomMargin=18)
-Stor = []
-Style = getSampleStyleSheet()
-Title = "QTIP Benchmark Suite"
-Stor.append(Paragraph(Title, Style['Title']))
-H1 = "Results"
-Stor.append(Spacer(0, 36))
-Stor.append(Paragraph(H1, Style['h2']))
-compute = 0
-storage = 0
-network = 0
-try:
- compute = results.get_index('compute_result')
-except IOError:
- pass
-
-try:
- storage = results.get_index('storage_result')
-except IOError:
- pass
-try:
- network = results.get_index('network_result')
-except IOError:
- pass
-
-Stor.append(Paragraph("Compute Suite: %f" % compute, Style['h5']))
-Stor.append(Paragraph("Storage Suite: %f" % storage, Style['h5']))
-Stor.append(Paragraph("Network Suite: %f" % network, Style['h5']))
-graph.plot_indices(compute, storage, network)
-qtip_graph = ('qtip_graph.jpeg')
-im = Image(qtip_graph, 5 * inch, 4 * inch)
-Stor.append(im)
-Stor.append(Spacer(0, 12))
-Stor.append(Paragraph("Reference POD", Style['h5']))
-ptext = "The Dell OPNFV Lab POD3 has been taken as the reference POD against which the reference results have been collected. The POD consists of 6 identical servers. The details of such a server are:"
-Stor.append(Paragraph(ptext, Style['Normal']))
-ptext = "<bullet>&bull;</bullet>Server Type: Dell PowerEdge R630 Server"
-Stor.append(Paragraph(ptext, Style['Bullet']))
-ptext = "<bullet>&bull;</bullet>CPU: Intel Xeon E5-2698 @ 2300 MHz"
-Stor.append(Paragraph(ptext, Style["Bullet"]))
-ptext = "<bullet>&bull;</bullet>RAM: 128GB"
-Stor.append(Paragraph(ptext, Style["Bullet"]))
-ptext = "<bullet>&bull;</bullet>Storage SSD: 420GB"
-Stor.append(Paragraph(ptext, Style["Bullet"]))
-ptext = "<bullet>&bull;</bullet>Network Card: Intel 2P X520/2P I350 rNDC"
-Stor.append(Paragraph(ptext, Style["Bullet"]))
-ptext = "Servers interconnected through a DELL S4810 switch using a 10Gbps physical link"
-Stor.append(Paragraph(ptext, Style["Bullet"]))
-Stor.append(Spacer(0, 12))
-ptext = "For Further Details of the Reference POD hardware, please visit: https://wiki.opnfv.org/reference_pod_hardware_details"
-Stor.append(Paragraph(ptext, Style['Normal']))
-Stor.append(Spacer(0, 12))
-ptext = "For Details of the Reference POD Results, please visit: https://wiki.opnfv.org/reference_pod_qtip_results"
-Stor.append(Spacer(0, 12))
-Stor.append(Paragraph(ptext, Style['Normal']))
-Stor.append(Paragraph("RAW Results", Style['h1']))
-Stor.append(Paragraph("Compute Results", Style['h2']))
-
-dump_result(Stor, "../../results/dhrystone/", "Dhrystone_bm")
-dump_result(Stor, "../../results/dhrystone/", "Dhrystone_vm")
-
-dump_result(Stor, "../../results/whetstone/", "Whetstone_bm")
-dump_result(Stor, "../../results/whetstone/", "Whetstone_vm")
-
-dump_result(Stor, "../../results/ramspeed/", "Ramspeed_bm")
-dump_result(Stor, "../../results/ramspeed/", "Ramspeed_vm")
-
-dump_result(Stor, "../../results/ssl/", "SSL_bm")
-dump_result(Stor, "../../results/ssl/", "SSL_vm")
-
-Stor.append(Paragraph("Network Results", Style['h2']))
-dump_result(Stor, "../../results/iperf/", "IPERF_bm")
-dump_result(Stor, "../../results/iperf/", "IPERF_vm")
-dump_result(Stor, "../../results/iperf/", "IPERF_vm_2")
-
-Stor.append(Paragraph("Storage Results", Style['h2']))
-dump_result(Stor, "../../results/fio/", "fio_bm")
-dump_result(Stor, "../../results/fio/", "fio_vm")
-
-
-doc.build(Stor)
diff --git a/legacy/utils/spawn_vm.py b/legacy/utils/spawn_vm.py
deleted file mode 100644
index f38c9a3a..00000000
--- a/legacy/utils/spawn_vm.py
+++ /dev/null
@@ -1,206 +0,0 @@
-##############################################################################
-# Copyright (c) 2016 Dell Inc, ZTE and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import os
-import sys
-import yaml
-import heatclient.client
-import keystoneclient
-import time
-from env_setup import Env_setup
-from create_zones import AvailabilityZone
-import logger_utils
-
-logger = logger_utils.QtipLogger('spawn_vm').get
-
-
-class SpawnVM(Env_setup):
-
- def __init__(self, vm_info):
- logger.info('vm_info: %s' % vm_info)
- vm_role_ip_dict = vm_info.copy()
- self._keystone_client = None
- self._heat_client = None
- self._glance_client = None
- self._nova_client = None
- self.azone = AvailabilityZone()
- # TODO: it should clean up aggregates and stack after test case finished.
- self.azone.clean_all_aggregates()
- self.azone.create_aggs(vm_info['availability_zone'])
- self.heat_template = self.generate_heat_template(vm_info)
- self.create_stack(vm_role_ip_dict)
-
- @staticmethod
- def get_public_network():
-
- """
- TODO: GET THE NAMES OF THE PUBLIC NETWORKS for OTHER PROJECTS
- """
- installer = os.environ['INSTALLER_TYPE']
-
- if installer.lower() == 'fuel':
- return 'admin_floating_net'
- if installer.lower() == 'apex':
- return 'external'
- if installer.lower() == 'compass':
- return 'ext-net'
- if installer.lower() == 'joid':
- return 'ext-net'
-
- def generate_heat_template(self, vm_params):
- logger.info('Generating Heat Template')
- heat_dict = {}
- try:
- with open('./config/SampleHeat.yaml', 'r+') as H_temp:
- heat_dict = yaml.safe_load(H_temp)
- except yaml.YAMLError as exc:
- if hasattr(exc, 'problem_mark'):
- mark = exc.problem_mark
- logger.error(
- 'Error in qtip/config/SampleHeat.yaml at: (%s,%s)' % (mark.line + 1,
- mark.column + 1))
- logger.error('EXITING PROGRAM. Correct File and restart')
- sys.exit(1)
-
- fopen = open('./config/QtipKey.pub', 'r')
- fopenstr = fopen.read()
- fopenstr = fopenstr.rstrip()
- scriptcmd = '#!/bin/bash \n echo {0} >> foo.txt \n echo {1} >> /root/.ssh/authorized_keys'.format(
- fopenstr, fopenstr)
-
- netName = self.get_public_network()
- heat_dict['heat_template_version'] = '2015-04-30'
-
- heat_dict['parameters']['public_network'] = {
- 'type': 'string',
- 'default': netName
- }
-
- for x in range(1, len(vm_params['availability_zone']) + 1):
- avail_zone = vm_params['availability_zone'][x - 1]
-
- heat_dict['parameters']['availability_zone_' + str(x)] = \
- {'description': 'Availability Zone of the instance',
- 'default': avail_zone,
- 'type': 'string'}
-
- heat_dict['resources']['public_port_' + str(x)] = \
- {'type': 'OS::Neutron::Port',
- 'properties': {'network': {'get_resource': 'network'},
- 'security_groups': [{'get_resource': 'security_group'}],
- 'fixed_ips': [{'subnet_id': {'get_resource': 'subnet'}}]}}
-
- heat_dict['resources']['floating_ip_' + str(x)] = {
- 'type': 'OS::Neutron::FloatingIP',
- 'properties': {'floating_network': {'get_param': 'external_net_name'}}}
-
- heat_dict['resources']['floating_ip_assoc_' + str(x)] = {
- 'type': 'OS::Neutron::FloatingIPAssociation',
- 'properties': {
- 'floatingip_id': {'get_resource': 'floating_ip_' + str(x)},
- 'port_id': {'get_resource': 'public_port_' + str(x)}}}
-
- heat_dict['resources']['my_instance_' + str(x)] = \
- {'type': 'OS::Nova::Server',
- 'properties': {'image': {'get_param': 'image'},
- 'networks':
- [{'port': {'get_resource': 'public_port_' + str(x)}}],
- 'flavor': {'get_resource': 'flavor'},
- 'availability_zone': avail_zone,
- 'security_groups': [{'get_resource': 'security_group'}],
- 'name': 'instance' + str(x),
- 'user_data_format': 'RAW',
- 'user_data': scriptcmd}}
-
- heat_dict['outputs']['instance_PIP_' + str(x)] = {
- 'description': 'IP address of the instance',
- 'value': {'get_attr': ['my_instance_' + str(x), 'first_address']}}
-
- heat_dict['outputs']['instance_ip_' + str(x)] = {
- 'description': 'IP address of the instance',
- 'value': {'get_attr': ['floating_ip_' + str(x), 'floating_ip_address']}}
-
- heat_dict['outputs']['availability_instance_' + str(x)] = {
- 'description': 'Availability Zone of the Instance',
- 'value': {'get_param': 'availability_zone_' + str(x)}}
-
- del heat_dict['outputs']['description']
- logger.info(heat_dict)
-
- return heat_dict
-
- def _get_keystone_client(self):
- """returns a keystone client instance"""
-
- if self._keystone_client is None:
- self._keystone_client = keystoneclient.v2_0.client.Client(
- auth_url=os.environ.get('OS_AUTH_URL'),
- username=os.environ.get('OS_USERNAME'),
- password=os.environ.get('OS_PASSWORD'),
- tenant_name=os.environ.get('OS_TENANT_NAME'))
- return self._keystone_client
-
- def _get_heat_client(self):
- """returns a heat client instance"""
- if self._heat_client is None:
- keystone = self._get_keystone_client()
- heat_endpoint = keystone.service_catalog.url_for(
- service_type='orchestration')
- self._heat_client = heatclient.client.Client(
- '1', endpoint=heat_endpoint, token=keystone.auth_token)
- return self._heat_client
-
- def create_stack(self, vm_role_ip_dict):
- stackname = 'QTIP'
- heat = self._get_heat_client()
-
- self.delete_stack(stackname)
-
- logger.info('Start to create stack %s' % stackname)
- heat.stacks.create(stack_name=stackname, template=self.heat_template)
-
- stack_status = "IN_PROGRESS"
- while stack_status != 'COMPLETE':
- if stack_status == 'IN_PROGRESS':
- logger.debug('Create in Progress')
- if stack_status == 'CREATE_FAILED':
- raise RuntimeError("Stack %s created failed!" % stackname)
- stack_status = heat.stacks.get(stackname).status
- time.sleep(15)
- logger.info('Stack %s Created Complete!' % stackname)
-
- stack_outputs = heat.stacks.get(stackname).outputs
-
- for vm in range(len(vm_role_ip_dict['OS_image'])):
- for i in stack_outputs:
- instanceKey = "instance_ip_" + str(vm + 1)
- privateIPkey = 'instance_PIP_' + str(vm + 1)
- if i['output_key'] == instanceKey:
- Env_setup.roles_dict[vm_role_ip_dict['role'][vm]] \
- .append(str(i['output_value']))
- Env_setup.ip_pw_list.append((str(i['output_value']), ''))
-
- if i['output_key'] == privateIPkey:
- Env_setup.ip_pw_dict[vm_role_ip_dict['role'][vm]] = str(i['output_value'])
-
- logger.info('Getting Public IP(s): %s' % Env_setup.ip_pw_list)
-
- def delete_stack(self, stack_name):
- heat = self._get_heat_client()
-
- stacks = heat.stacks.list()
- exists = map(lambda x: x.stack_name, stacks)
- if stack_name in exists:
- logger.info("Delete stack %s" % stack_name)
- heat.stacks.delete(stack_name)
- while stack_name in exists:
- time.sleep(10)
- stacks = heat.stacks.list()
- exists = map(lambda x: x.stack_name, stacks)
- logger.debug("exists_stacks: %s" % exists)
- logger.info("%s doesn't exist" % stack_name)
diff --git a/legacy/utils/transform/__init__.py b/legacy/utils/transform/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/legacy/utils/transform/__init__.py
+++ /dev/null
diff --git a/legacy/utils/transform/fio_transform.py b/legacy/utils/transform/fio_transform.py
deleted file mode 100644
index e8de2f9a..00000000
--- a/legacy/utils/transform/fio_transform.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import json
-import pickle
-import os
-import datetime
-
-
-def get_fio_job_result(fio_job_data):
- return {'read': {'io_bytes': fio_job_data["read"]["io_bytes"],
- 'io_ps': fio_job_data["read"]["iops"],
- 'io_runtime_millisec': fio_job_data["read"]["runtime"],
- 'mean_io_latenchy_microsec': fio_job_data["read"]["lat"]["mean"]},
- 'write': {'io_bytes': fio_job_data["write"]["io_bytes"],
- 'io_ps': fio_job_data["write"]["iops"],
- 'io_runtime_millisec': fio_job_data["write"]["runtime"],
- 'mean_io_latenchy_microsec': fio_job_data["write"]["lat"]["mean"]}}
-
-
-with open("fio_result.json") as fio_raw:
- fio_data = json.load(fio_raw)
-
-fio_result_dict = {}
-for x, result in enumerate(map(get_fio_job_result, fio_data["jobs"])):
- fio_result_dict['job_{0}'.format(x)] = result
-
-host_name = (os.popen("hostname").read().rstrip())
-report_time = str(datetime.datetime.utcnow().isoformat())
-os.system("mv fio_result.json " + str(host_name) + "-" + report_time + ".log")
-with open('./result_temp', 'w + ')as out_fio_result:
- pickle.dump(fio_result_dict, out_fio_result)
diff --git a/legacy/utils/transform/iperf_transform.py b/legacy/utils/transform/iperf_transform.py
deleted file mode 100644
index c5eef6f5..00000000
--- a/legacy/utils/transform/iperf_transform.py
+++ /dev/null
@@ -1,35 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 ZTE Corporation and others.
-#
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-import json
-import datetime
-import pickle
-with open('iperf_raw.json', 'r') as ifile:
- raw_iperf_data = json.loads(ifile.read().rstrip())
-
-bits_sent = raw_iperf_data['end']['sum_sent']['bits_per_second']
-bits_received = raw_iperf_data['end']['sum_received']['bits_per_second']
-total_byte_sent = raw_iperf_data['end']['sum_sent']['bytes']
-total_byte_received = raw_iperf_data['end']['sum_received']['bytes']
-cpu_host_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['host_total']
-cpu_remote_total_percent = raw_iperf_data['end']['cpu_utilization_percent']['remote_total']
-
-time_stamp = str(datetime.datetime.utcnow().isoformat())
-
-result = {'version': raw_iperf_data['start']['version'],
- 'bandwidth': {'sender_throughput': bits_sent,
- 'received_throughput': bits_received},
- 'cpu': {'cpu_host': cpu_host_total_percent,
- 'cpu_remote': cpu_remote_total_percent}
- }
-
-with open('iperf_raw-' + time_stamp + '.log', 'w+') as ofile:
- ofile.write(json.dumps(raw_iperf_data))
-
-with open('./result_temp', 'w+') as result_file:
- pickle.dump(result, result_file)