summaryrefslogtreecommitdiffstats
path: root/testcases/vIMS/CI
diff options
context:
space:
mode:
authorjose.lausuch <jose.lausuch@ericsson.com>2016-04-20 16:03:44 +0200
committerjose.lausuch <jose.lausuch@ericsson.com>2016-04-21 00:01:59 +0200
commitef62824e0471d07a4a3a40c401fc433070d961c6 (patch)
tree6d1bd61e7d49446ba1b1a52bb9978e1c5b88173c /testcases/vIMS/CI
parent3a85a34474a9d7a9384f22bce35e7b81177830e3 (diff)
Fix Flake8 Violations in the Functest scripts
JIRA: FUNCTEST-213 Change-Id: I66c02dd6ff12ffb9798ebe44a4cfe7bfc73e76c3 Signed-off-by: jose.lausuch <jose.lausuch@ericsson.com>
Diffstat (limited to 'testcases/vIMS/CI')
-rw-r--r--testcases/vIMS/CI/clearwater.py11
-rw-r--r--testcases/vIMS/CI/orchestrator.py45
-rw-r--r--testcases/vIMS/CI/vIMS.py76
3 files changed, 82 insertions, 50 deletions
diff --git a/testcases/vIMS/CI/clearwater.py b/testcases/vIMS/CI/clearwater.py
index 364de68f4..7236f4fba 100644
--- a/testcases/vIMS/CI/clearwater.py
+++ b/testcases/vIMS/CI/clearwater.py
@@ -10,8 +10,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
########################################################################
-import sys
-import subprocess
class clearwater:
@@ -40,7 +38,8 @@ class clearwater:
def set_public_domain(self, public_domain):
self.config['public_domain'] = public_domain
- def deploy_vnf(self, blueprint, bp_name='clearwater', dep_name='clearwater-opnfv'):
+ def deploy_vnf(self, blueprint, bp_name='clearwater',
+ dep_name='clearwater-opnfv'):
if self.orchestrator:
self.dep_name = dep_name
error = self.orchestrator.download_upload_and_deploy_blueprint(
@@ -52,7 +51,7 @@ class clearwater:
else:
if self.logger:
- logger.error("Cloudify manager is down or not provide...")
+ self.logger.error("Cloudify manager is down or not provide...")
def undeploy_vnf(self):
if self.orchestrator:
@@ -61,7 +60,7 @@ class clearwater:
self.orchestrator.undeploy_deployment(self.dep_name)
else:
if self.logger:
- logger.error("Clearwater isn't already deploy...")
+ self.logger.error("Clearwater isn't already deploy...")
else:
if self.logger:
- logger.error("Cloudify manager is down or not provide...")
+ self.logger.error("Cloudify manager is down or not provide...")
diff --git a/testcases/vIMS/CI/orchestrator.py b/testcases/vIMS/CI/orchestrator.py
index 965c2646d..7dbbda716 100644
--- a/testcases/vIMS/CI/orchestrator.py
+++ b/testcases/vIMS/CI/orchestrator.py
@@ -10,7 +10,6 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
########################################################################
-import sys
import subprocess
import os
import shutil
@@ -56,17 +55,21 @@ class orchestrator:
def set_logger(self, logger):
self.logger = logger
- def download_manager_blueprint(self, manager_blueprint_url, manager_blueprint_branch):
+ def download_manager_blueprint(self, manager_blueprint_url,
+ manager_blueprint_branch):
if self.manager_blueprint:
if self.logger:
self.logger.info(
- "cloudify manager server blueprint is already downloaded !")
+ "cloudify manager server blueprint is "
+ "already downloaded !")
else:
if self.logger:
self.logger.info(
"Downloading the cloudify manager server blueprint")
download_result = download_blueprints(
- manager_blueprint_url, manager_blueprint_branch, self.blueprint_dir)
+ manager_blueprint_url,
+ manager_blueprint_branch,
+ self.blueprint_dir)
if not download_result:
if self.logger:
@@ -76,7 +79,7 @@ class orchestrator:
self.manager_blueprint = True
def manager_up(self):
- return manager_up
+ return self.manager_up
def deploy_manager(self):
if self.manager_blueprint:
@@ -98,13 +101,16 @@ class orchestrator:
if self.logger:
self.logger.info("Launching the cloudify-manager deployment")
script = "set -e; "
- script += "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
+ script += "source " + self.testcase_dir + \
+ "venv_cloudify/bin/activate; "
script += "cd " + self.testcase_dir + "; "
script += "cfy init -r; "
script += "cd cloudify-manager-blueprint; "
- script += "cfy local create-requirements -o requirements.txt -p openstack-manager-blueprint.yaml; "
+ script += "cfy local create-requirements -o requirements.txt " + \
+ "-p openstack-manager-blueprint.yaml; "
script += "pip install -r requirements.txt; "
- script += "timeout 1800 cfy bootstrap --install-plugins -p openstack-manager-blueprint.yaml -i inputs.yaml; "
+ script += "timeout 1800 cfy bootstrap --install-plugins " + \
+ "-p openstack-manager-blueprint.yaml -i inputs.yaml; "
cmd = "/bin/bash -c '" + script + "'"
error = execute_command(cmd, self.logger)
if error:
@@ -131,23 +137,30 @@ class orchestrator:
self.logger.info(
"Cloudify-manager server has been successfully removed!")
- def download_upload_and_deploy_blueprint(self, blueprint, config, bp_name, dep_name):
+ def download_upload_and_deploy_blueprint(self, blueprint, config,
+ bp_name, dep_name):
if self.logger:
self.logger.info("Downloading the {0} blueprint".format(
blueprint['file_name']))
- download_result = download_blueprints(blueprint['url'], blueprint['branch'],
- self.testcase_dir + blueprint['destination_folder'])
+ download_result = download_blueprints(blueprint['url'],
+ blueprint['branch'],
+ self.testcase_dir +
+ blueprint['destination_folder'])
if not download_result:
if self.logger:
self.logger.error(
- "Failed to download blueprint {0}".format(blueprint['file_name']))
+ "Failed to download blueprint {0}".
+ format(blueprint['file_name']))
exit(-1)
if self.logger:
self.logger.info("Writing the inputs file")
- with open(self.testcase_dir + blueprint['destination_folder'] + "/inputs.yaml", "w") as f:
+
+ with open(self.testcase_dir + blueprint['destination_folder'] +
+ "/inputs.yaml", "w") as f:
f.write(yaml.dump(config, default_style='"'))
+
f.close()
if self.logger:
@@ -159,7 +172,8 @@ class orchestrator:
bp_name + " -p openstack-blueprint.yaml; "
script += "cfy deployments create -b " + bp_name + \
" -d " + dep_name + " --inputs inputs.yaml; "
- script += "cfy executions start -w install -d " + dep_name + " --timeout 1800; "
+ script += "cfy executions start -w install -d " \
+ + dep_name + " --timeout 1800; "
cmd = "/bin/bash -c '" + script + "'"
error = execute_command(cmd, self.logger)
@@ -173,7 +187,8 @@ class orchestrator:
self.logger.info("Launching the {0} undeployment".format(dep_name))
script = "source " + self.testcase_dir + "venv_cloudify/bin/activate; "
script += "cd " + self.testcase_dir + "; "
- script += "cfy executions start -w uninstall -d " + dep_name + " --timeout 1800 ; "
+ script += "cfy executions start -w uninstall -d " + dep_name \
+ + " --timeout 1800 ; "
script += "cfy deployments delete -d " + dep_name + "; "
cmd = "/bin/bash -c '" + script + "'"
diff --git a/testcases/vIMS/CI/vIMS.py b/testcases/vIMS/CI/vIMS.py
index 3eef5b381..2518855cd 100644
--- a/testcases/vIMS/CI/vIMS.py
+++ b/testcases/vIMS/CI/vIMS.py
@@ -1,4 +1,4 @@
- #!/usr/bin/python
+#!/usr/bin/python
# coding: utf8
#######################################################################
#
@@ -27,14 +27,14 @@ import glanceclient.client as glclient
import novaclient.client as nvclient
from neutronclient.v2_0 import client as ntclient
-from orchestrator import *
-from clearwater import *
+import orchestrator
+import clearwater
pp = pprint.PrettyPrinter(indent=4)
parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
parser.add_argument("-r", "--report",
help="Create json result file",
action="store_true")
@@ -127,7 +127,7 @@ def step_failure(step_name, error_msg):
set_result(step_name, 0, error_msg)
status = "failed"
if step_name == "sig_test":
- status = "passed"
+ status = "passed"
push_results(status)
exit(-1)
@@ -167,10 +167,12 @@ def test_clearwater():
mgr_ip = os.popen(cmd).read()
mgr_ip = mgr_ip.splitlines()[0]
except:
- step_failure("sig_test", "Unable to retrieve the IP of the cloudify manager server !")
+ step_failure("sig_test", "Unable to retrieve the IP of the "
+ "cloudify manager server !")
api_url = "http://" + mgr_ip + "/api/v2"
- dep_outputs = requests.get(api_url + "/deployments/" + CW_DEPLOYMENT_NAME + "/outputs")
+ dep_outputs = requests.get(api_url + "/deployments/" +
+ CW_DEPLOYMENT_NAME + "/outputs")
dns_ip = dep_outputs.json()['outputs']['dns_ip']
ellis_ip = dep_outputs.json()['outputs']['ellis_ip']
@@ -186,7 +188,7 @@ def test_clearwater():
i = 20
while rq.status_code != 201 and i > 0:
rq = requests.post(url, data=params)
- i = i-1
+ i = i - 1
time.sleep(10)
if rq.status_code == 201:
@@ -200,11 +202,12 @@ def test_clearwater():
i = 24
while rq.status_code != 200 and i > 0:
rq = requests.post(url, cookies=cookies)
- i = i-1
+ i = i - 1
time.sleep(25)
if rq.status_code != 200:
- step_failure("sig_test", "Unable to create a number: %s" % rq.json()['reason'])
+ step_failure("sig_test", "Unable to create a number: %s"
+ % rq.json()['reason'])
start_time_ts = time.time()
end_time_ts = start_time_ts
@@ -217,7 +220,8 @@ def test_clearwater():
resolvconf += "\nnameserver " + ns
if dns_ip != "":
- script = 'echo -e "nameserver ' + dns_ip + resolvconf + '" > /etc/resolv.conf; '
+ script = 'echo -e "nameserver ' + dns_ip + resolvconf + \
+ '" > /etc/resolv.conf; '
script += 'source /etc/profile.d/rvm.sh; '
script += 'cd ' + VIMS_TEST_DIR + '; '
script += 'rake test[' + \
@@ -226,8 +230,8 @@ def test_clearwater():
cmd = "/bin/bash -c '" + script + "'"
output_file = "output.txt"
f = open(output_file, 'w+')
- p = subprocess.call(cmd, shell=True, stdout=f,
- stderr=subprocess.STDOUT)
+ subprocess.call(cmd, shell=True, stdout=f,
+ stderr=subprocess.STDOUT)
f.close()
end_time_ts = time.time()
duration = round(end_time_ts - start_time_ts, 1)
@@ -253,7 +257,8 @@ def test_clearwater():
# - VNF deployed
status = "failed"
try:
- if RESULTS['orchestrator']['duration'] > 0 and RESULTS['vIMS']['duration'] > 0:
+ if (RESULTS['orchestrator']['duration'] > 0 and
+ RESULTS['vIMS']['duration'] > 0):
status = "passed"
except:
logger.error("Unable to set test status")
@@ -299,7 +304,8 @@ def main():
if role_id == '':
logger.error("Error : Failed to get id for %s role" % role_name)
- if not openstack_utils.add_role_user(keystone, user_id, role_id, tenant_id):
+ if not openstack_utils.add_role_user(keystone, user_id,
+ role_id, tenant_id):
logger.error("Error : Failed to add %s on tenant" %
ks_creds['username'])
@@ -324,8 +330,9 @@ def main():
})
logger.info("Upload some OS images if it doesn't exist")
- glance_endpoint = keystone.service_catalog.url_for(service_type='image',
- endpoint_type='publicURL')
+ glance_endpoint = keystone.\
+ service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)
for img in IMAGES.keys():
@@ -335,14 +342,16 @@ def main():
image_id = openstack_utils.get_image_id(glance, image_name)
if image_id == '':
- logger.info("""%s image doesn't exist on glance repository.
- Try downloading this image and upload on glance !""" % image_name)
+ logger.info("""%s image doesn't exist on glance repository. Try
+ downloading this image and upload on glance !""" % image_name)
image_id = download_and_add_image_on_glance(
glance, image_name, image_url)
if image_id == '':
step_failure(
- "init", "Error : Failed to find or upload required OS image for this deployment")
+ "init",
+ "Error : Failed to find or upload required OS "
+ "image for this deployment")
nova = nvclient.Client("2", **nv_creds)
@@ -350,7 +359,8 @@ def main():
neutron = ntclient.Client(**nt_creds)
if not openstack_utils.update_sg_quota(neutron, tenant_id, 50, 100):
step_failure(
- "init", "Failed to update security group quota for tenant " + TENANT_NAME)
+ "init",
+ "Failed to update security group quota for tenant " + TENANT_NAME)
logger.info("Update cinder quota for this tenant")
from cinderclient import client as cinderclient
@@ -361,7 +371,8 @@ def main():
creds_cinder['project_id'],
creds_cinder['auth_url'],
service_type="volume")
- if not openstack_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150):
+ if not openstack_utils.update_cinder_quota(cinder_client, tenant_id,
+ 20, 10, 150):
step_failure(
"init", "Failed to update cinder quota for tenant " + TENANT_NAME)
@@ -370,7 +381,8 @@ def main():
cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger)
cfy.set_credentials(username=ks_creds['username'], password=ks_creds[
- 'password'], tenant_name=ks_creds['tenant_name'], auth_url=ks_creds['auth_url'])
+ 'password'], tenant_name=ks_creds['tenant_name'],
+ auth_url=ks_creds['auth_url'])
logger.info("Collect flavor id for cloudify manager server")
nova = nvclient.Client("2", **nv_creds)
@@ -384,8 +396,10 @@ def main():
if flavor_id == '':
logger.error(
- "Failed to find %s flavor. Try with ram range default requirement !" % flavor_name)
- flavor_id = openstack_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)
+ "Failed to find %s flavor. "
+ "Try with ram range default requirement !" % flavor_name)
+ flavor_id = openstack_utils.\
+ get_flavor_id_by_ram_range(nova, 4000, 8196)
if flavor_id == '':
step_failure("orchestrator",
@@ -402,7 +416,8 @@ def main():
if image_id == '':
step_failure(
- "orchestrator", "Error : Failed to find required OS image for cloudify manager")
+ "orchestrator",
+ "Error : Failed to find required OS image for cloudify manager")
cfy.set_image_id(image_id)
@@ -458,8 +473,10 @@ def main():
if flavor_id == '':
logger.error(
- "Failed to find %s flavor. Try with ram range default requirement !" % flavor_name)
- flavor_id = openstack_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)
+ "Failed to find %s flavor. Try with ram range "
+ "default requirement !" % flavor_name)
+ flavor_id = openstack_utils.\
+ get_flavor_id_by_ram_range(nova, 4000, 8196)
if flavor_id == '':
step_failure(
@@ -476,7 +493,8 @@ def main():
if image_id == '':
step_failure(
- "vIMS", "Error : Failed to find required OS image for cloudify manager")
+ "vIMS",
+ "Error : Failed to find required OS image for cloudify manager")
cw.set_image_id(image_id)