aboutsummaryrefslogtreecommitdiffstats
path: root/testcases/vPing/CI/libraries/vPing.py
diff options
context:
space:
mode:
Diffstat (limited to 'testcases/vPing/CI/libraries/vPing.py')
-rw-r--r--testcases/vPing/CI/libraries/vPing.py587
1 files changed, 396 insertions, 191 deletions
diff --git a/testcases/vPing/CI/libraries/vPing.py b/testcases/vPing/CI/libraries/vPing.py
index b81ebb881..be0d2341a 100644
--- a/testcases/vPing/CI/libraries/vPing.py
+++ b/testcases/vPing/CI/libraries/vPing.py
@@ -1,277 +1,482 @@
#!/usr/bin/python
#
-# Copyright (c) 2015 All rights reserved. This program and the accompanying materials
+# Copyright (c) 2015 All rights reserved
+# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# This script boots the VM1 and allocates IP address from Nova
+# 0.1: This script boots the VM1 and allocates IP address from Nova
# Later, the VM2 boots then execute cloud-init to ping VM1.
# After successful ping, both the VMs are deleted.
+# 0.2: measure test duration and publish results under json format
#
-# Note: this is script works only with Ubuntu image, not with Cirros image
#
-import os, time, subprocess, logging, argparse, yaml
+import os
+import time
+import argparse
import pprint
-import novaclient.v2.client as novaclient
-import neutronclient.client as neutronclient
-#import novaclient.v1_1.client as novaclient
-import cinderclient.v1.client as cinderclient
-pp = pprint.PrettyPrinter(indent=4)
+import sys
+import logging
+import yaml
+import datetime
+from novaclient import client as novaclient
+from neutronclient.v2_0 import client as neutronclient
+from keystoneclient.v2_0 import client as keystoneclient
+from glanceclient import client as glanceclient
-EXIT_CODE = -1
-HOME = os.environ['HOME']+"/"
-with open(HOME+'.functest/functest.yaml') as f:
- functest_yaml = yaml.safe_load(f)
-f.close()
+pp = pprint.PrettyPrinter(indent=4)
-PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
-NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
-NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
-GLANCE_IMAGE_NAME = functest_yaml.get("general").get("openstack").get("image_name")
-NEUTRON_NET_NAME = functest_yaml.get("general").get("openstack").get("neutron_net_name")
-FLAVOR = functest_yaml.get("vping").get("vm_flavor")
+parser = argparse.ArgumentParser()
+parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
+parser.add_argument("-r", "--report",
+ help="Create json result file",
+ action="store_true")
-parser = argparse.ArgumentParser()
-parser.add_argument("-d", "--debug", help="Debug mode", action="store_true")
args = parser.parse_args()
""" logging configuration """
+
logger = logging.getLogger('vPing')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
+
if args.debug:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
-formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+
+formatter = logging.Formatter('%(asctime)s - %(name)s'
+ '- %(levelname)s - %(message)s')
+
ch.setFormatter(formatter)
logger.addHandler(ch)
+REPO_PATH=os.environ['repos_dir']+'/functest/'
+if not os.path.exists(REPO_PATH):
+ logger.error("Functest repository directory not found '%s'" % REPO_PATH)
+ exit(-1)
+sys.path.append(REPO_PATH + "testcases/")
+import functest_utils
+
+with open("/home/opnfv/functest/conf/config_functest.yaml") as f:
+ functest_yaml = yaml.safe_load(f)
+f.close()
+
+HOME = os.environ['HOME'] + "/"
+# vPing parameters
+VM_BOOT_TIMEOUT = 180
+VM_DELETE_TIMEOUT = 100
+PING_TIMEOUT = functest_yaml.get("vping").get("ping_timeout")
+TEST_DB = functest_yaml.get("results").get("test_db_url")
+NAME_VM_1 = functest_yaml.get("vping").get("vm_name_1")
+NAME_VM_2 = functest_yaml.get("vping").get("vm_name_2")
+IP_1 = functest_yaml.get("vping").get("ip_1")
+IP_2 = functest_yaml.get("vping").get("ip_2")
+# GLANCE_IMAGE_NAME = functest_yaml.get("general"). \
+# get("openstack").get("image_name")
+GLANCE_IMAGE_NAME = "functest-vping"
+GLANCE_IMAGE_FILENAME = functest_yaml.get("general"). \
+ get("openstack").get("image_file_name")
+GLANCE_IMAGE_FORMAT = functest_yaml.get("general"). \
+ get("openstack").get("image_disk_format")
+GLANCE_IMAGE_PATH = functest_yaml.get("general"). \
+ get("directories").get("dir_functest_data") + "/" + GLANCE_IMAGE_FILENAME
+
+
+FLAVOR = functest_yaml.get("vping").get("vm_flavor")
+
+# NEUTRON Private Network parameters
+
+NEUTRON_PRIVATE_NET_NAME = functest_yaml.get("vping"). \
+ get("vping_private_net_name")
+
+NEUTRON_PRIVATE_SUBNET_NAME = functest_yaml.get("vping"). \
+ get("vping_private_subnet_name")
+
+NEUTRON_PRIVATE_SUBNET_CIDR = functest_yaml.get("vping"). \
+ get("vping_private_subnet_cidr")
+
+NEUTRON_ROUTER_NAME = functest_yaml.get("vping"). \
+ get("vping_router_name")
def pMsg(value):
+
"""pretty printing"""
pp.pprint(value)
-def print_title(title):
- """Print titles"""
- print "\n"+"#"*40+"\n# "+title+"\n"+"#"*40+"\n"
-
-def get_credentials(service):
- """Returns a creds dictionary filled with the following keys:
- * username
- * password/api_key (depending on the service)
- * tenant_name/project_id (depending on the service)
- * auth_url
- :param service: a string indicating the name of the service
- requesting the credentials.
- """
- #TODO: get credentials from the openrc file
- creds = {}
- # Unfortunately, each of the OpenStack client will request slightly
- # different entries in their credentials dict.
- if service.lower() in ("nova", "cinder"):
- password = "api_key"
- tenant = "project_id"
- else:
- password = "password"
- tenant = "tenant_name"
- # The most common way to pass these info to the script is to do it through
- # environment variables.
- creds.update({
- "username": os.environ.get('OS_USERNAME', "admin"), # add your cloud username details
- password: os.environ.get("OS_PASSWORD", 'admin'), # add password
- "auth_url": os.environ.get("OS_AUTH_URL","http://192.168.20.71:5000/v2.0"), # Auth URL
- tenant: os.environ.get("OS_TENANT_NAME", "admin"),
- })
+def waitVmActive(nova, vm):
- return creds
+ # sleep and wait for VM status change
+ sleep_time = 3
+ count = VM_BOOT_TIMEOUT / sleep_time
+ while True:
+ status = functest_utils.get_instance_status(nova, vm)
+ logger.debug("Status: %s" % status)
+ if status == "ACTIVE":
+ return True
+ if status == "ERROR" or status == "error":
+ return False
+ if count == 0:
+ logger.debug("Booting a VM timed out...")
+ return False
+ count -= 1
+ time.sleep(sleep_time)
+ return False
+
+
+def waitVmDeleted(nova, vm):
+
+ # sleep and wait for VM status change
+ sleep_time = 3
+ count = VM_DELETE_TIMEOUT / sleep_time
+ while True:
+ status = functest_utils.get_instance_status(nova, vm)
+ if not status:
+ return True
+ elif count == 0:
+ logger.debug("Timeout")
+ return False
+ else:
+ # return False
+ count -= 1
+ time.sleep(sleep_time)
+ return False
-def get_server(creds, servername):
- nova = novaclient.Client(**creds)
- return nova.servers.find(name=servername)
+def create_private_neutron_net(neutron):
+ neutron.format = 'json'
+ logger.info('Creating neutron network %s...' % NEUTRON_PRIVATE_NET_NAME)
+ network_id = functest_utils. \
+ create_neutron_net(neutron, NEUTRON_PRIVATE_NET_NAME)
-def waitVmActive(nova,vm):
- # sleep and wait for VM status change
- while get_status(nova,vm) != "ACTIVE":
- time.sleep(3)
- logger.debug("Status: %s" % vm.status)
- logger.debug("Status: %s" % vm.status)
+ if not network_id:
+ return False
+ logger.debug("Network '%s' created successfully" % network_id)
+ logger.debug('Creating Subnet....')
+ subnet_id = functest_utils. \
+ create_neutron_subnet(neutron,
+ NEUTRON_PRIVATE_SUBNET_NAME,
+ NEUTRON_PRIVATE_SUBNET_CIDR,
+ network_id)
+ if not subnet_id:
+ return False
+ logger.debug("Subnet '%s' created successfully" % subnet_id)
+ logger.debug('Creating Router...')
+ router_id = functest_utils. \
+ create_neutron_router(neutron, NEUTRON_ROUTER_NAME)
-def get_status(nova,vm):
- vm = nova.servers.get(vm.id)
- return vm.status
+ if not router_id:
+ return False
+
+ logger.debug("Router '%s' created successfully" % router_id)
+ logger.debug('Adding router to subnet...')
+
+ result = functest_utils.add_interface_router(neutron, router_id, subnet_id)
+
+ if not result:
+ return False
+
+ logger.debug("Interface added successfully.")
+ network_dic = {'net_id': network_id,
+ 'subnet_id': subnet_id,
+ 'router_id': router_id}
+ return network_dic
+
+
+def cleanup(nova, neutron, image_id, network_dic, port_id1, port_id2):
+
+ # delete both VMs
+ logger.info("Cleaning up...")
+ logger.debug("Deleting image...")
+ if not functest_utils.delete_glance_image(nova, image_id):
+ logger.error("Error deleting the glance image")
+
+ vm1 = functest_utils.get_instance_by_name(nova, NAME_VM_1)
+ if vm1:
+ logger.debug("Deleting '%s'..." % NAME_VM_1)
+ nova.servers.delete(vm1)
+ # wait until VMs are deleted
+ if not waitVmDeleted(nova, vm1):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova, vm1)))
+ else:
+ logger.debug("Instance %s terminated." % NAME_VM_1)
+
+ vm2 = functest_utils.get_instance_by_name(nova, NAME_VM_2)
+
+ if vm2:
+ logger.debug("Deleting '%s'..." % NAME_VM_2)
+ vm2 = nova.servers.find(name=NAME_VM_2)
+ nova.servers.delete(vm2)
+
+ if not waitVmDeleted(nova, vm2):
+ logger.error(
+ "Instance '%s' with cannot be deleted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova, vm2)))
+ else:
+ logger.debug("Instance %s terminated." % NAME_VM_2)
+
+ # delete created network
+ logger.info("Deleting network '%s'..." % NEUTRON_PRIVATE_NET_NAME)
+ net_id = network_dic["net_id"]
+ subnet_id = network_dic["subnet_id"]
+ router_id = network_dic["router_id"]
+
+ if not functest_utils.delete_neutron_port(neutron, port_id1):
+ logger.error("Unable to remove port '%s'" % port_id1)
+ return False
+ logger.debug("Port '%s' removed successfully" % port_id1)
+
+ if not functest_utils.delete_neutron_port(neutron, port_id2):
+ logger.error("Unable to remove port '%s'" % port_id2)
+ return False
+ logger.debug("Port '%s' removed successfully" % port_id2)
+
+ if not functest_utils.remove_interface_router(neutron, router_id,
+ subnet_id):
+ logger.error("Unable to remove subnet '%s' from router '%s'" % (
+ subnet_id, router_id))
+ return False
+
+ logger.debug("Interface removed successfully")
+ if not functest_utils.delete_neutron_router(neutron, router_id):
+ logger.error("Unable to delete router '%s'" % router_id)
+ return False
+
+ logger.debug("Router deleted successfully")
+
+ if not functest_utils.delete_neutron_subnet(neutron, subnet_id):
+ logger.error("Unable to delete subnet '%s'" % subnet_id)
+ return False
+
+ logger.debug(
+ "Subnet '%s' deleted successfully" % NEUTRON_PRIVATE_SUBNET_NAME)
+
+ if not functest_utils.delete_neutron_net(neutron, net_id):
+ logger.error("Unable to delete network '%s'" % net_id)
+ return False
+
+ logger.debug(
+ "Network '%s' deleted successfully" % NEUTRON_PRIVATE_NET_NAME)
+
+ return True
def main():
- creds = get_credentials("nova")
- nova = novaclient.Client(**creds)
- cinder = cinderclient.Client(**creds)
-
- """
- # print images and server resources
- # print nova_images
- print_title("images list")
- pMsg(nova.images.list())
- print_title("servers list")
- pMsg(nova.servers.list())
- """
- # Check if the given image is created
- images=nova.images.list()
- image_found = False
- for image in images:
- if image.name == GLANCE_IMAGE_NAME:
- logger.info("Glance image found '%s'" %image.name)
- image_found = True
- if not image_found:
- logger.error("ERROR: Glance image %s not found." % GLANCE_IMAGE_NAME)
+
+ creds_nova = functest_utils.get_credentials("nova")
+ nova_client = novaclient.Client('2',**creds_nova)
+ creds_neutron = functest_utils.get_credentials("neutron")
+ neutron_client = neutronclient.Client(**creds_neutron)
+ creds_keystone = functest_utils.get_credentials("keystone")
+ keystone_client = keystoneclient.Client(**creds_keystone)
+ glance_endpoint = keystone_client.service_catalog.url_for(service_type='image',
+ endpoint_type='publicURL')
+ glance_client = glanceclient.Client(1, glance_endpoint,
+ token=keystone_client.auth_token)
+ EXIT_CODE = -1
+
+ image = None
+ flavor = None
+
+ logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
+ GLANCE_IMAGE_PATH))
+ image_id = functest_utils.create_glance_image(glance_client,
+ GLANCE_IMAGE_NAME,GLANCE_IMAGE_PATH)
+ if not image_id:
+ logger.error("Failed to create a Glance image...")
+ exit(-1)
+
+ # Check if the given image exists
+ image = functest_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
+ if image == '':
+ logger.error("ERROR: Glance image '%s' not found." % GLANCE_IMAGE_NAME)
logger.info("Available images are: ")
- pMsg(nova.images.list())
+ pMsg(nova_client.images.list())
+ exit(-1)
+
+ network_dic = create_private_neutron_net(neutron_client)
+
+ if not network_dic:
+ logger.error(
+ "There has been a problem when creating the neutron network")
exit(-1)
- # Check if the given neutron network is created
- networks=nova.networks.list()
- network_found = False
- for net in networks:
- if net.human_id == NEUTRON_NET_NAME:
- logger.info("Network found '%s'" %net.human_id)
- network_found = True
- if not network_found:
- logger.error("Neutron network %s not found." % NEUTRON_NET_NAME)
- logger.info("Available networks are: ")
- pMsg(nova.networks.list())
+ network_id = network_dic["net_id"]
+
+ # Check if the given flavor exists
+
+ try:
+ flavor = nova_client.flavors.find(name=FLAVOR)
+ logger.info("Flavor found '%s'" % FLAVOR)
+ except:
+ logger.error("Flavor '%s' not found." % FLAVOR)
+ logger.info("Available flavors are: ")
+ pMsg(nova_client.flavor.list())
exit(-1)
- servers=nova.servers.list()
+ # Deleting instances if they exist
+
+ servers = nova_client.servers.list()
for server in servers:
if server.name == NAME_VM_1 or server.name == NAME_VM_2:
- logger.info("Instance %s found. Deleting..." %server.name)
+ logger.info("Instance %s found. Deleting..." % server.name)
server.delete()
-
-
# boot VM 1
# basic boot
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
- m = NAME_VM_1
- f = nova.flavors.find(name = FLAVOR)
- i = nova.images.find(name = GLANCE_IMAGE_NAME)
- n = nova.networks.find(label = NEUTRON_NET_NAME)
- u = "#cloud-config\npassword: opnfv\nchpasswd: { expire: False }\nssh_pwauth: True"
- #k = "demo-key"
+ # tune (e.g. flavor, images, network) to your specific
+ # openstack configuration here
+ # we consider start time at VM1 booting
+ start_time_ts = time.time()
+ end_time_ts = start_time_ts
+ logger.info("vPing Start Time:'%s'" % (
+ datetime.datetime.fromtimestamp(start_time_ts).strftime(
+ '%Y-%m-%d %H:%M:%S')))
# create VM
- logger.info("Creating instance '%s'..." %m)
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n userdata= \n%s" %(m,f,i,n,u))
- vm1 = nova.servers.create(
- name = m,
- flavor = f,
- image = i,
- nics = [{"net-id": n.id}],
- #key_name = k,
- userdata = u,
- )
+ logger.debug("Creating port 'vping-port-1' with IP %s..." % IP_1)
+ port_id1 = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-1", network_id,
+ IP_1)
+ if not port_id1:
+ logger.error("Unable to create port.")
+ exit(-1)
- #pMsg(vm1)
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_1, IP_1))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
+ "network=%s \n" % (NAME_VM_1, flavor, image, network_id))
+ vm1 = nova_client.servers.create(
+ name=NAME_VM_1,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_1}]
+ nics=[{"port-id": port_id1}]
+ )
+ # wait until VM status is active
+ if not waitVmActive(nova_client, vm1):
- #wait until VM status is active
- waitVmActive(nova,vm1)
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_1, functest_utils.get_instance_status(nova_client, vm1)))
+ cleanup(nova_client, neutron_client, image_id, network_dic, port_id1)
+ return (EXIT_CODE)
+ else:
+ logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)
- #retrieve IP of first VM
- logger.debug("Fetching IP...")
- server = get_server(creds, m)
- #pMsg(server.networks)
- # theoretically there is only one IP address so we take the first element of the table
+ # Retrieve IP of first VM
+ # logger.debug("Fetching IP...")
+ # server = functest_utils.get_instance_by_name(nova_client, NAME_VM_1)
+ # theoretically there is only one IP address so we take the
+ # first element of the table
# Dangerous! To be improved!
- test_ip = server.networks.get(NEUTRON_NET_NAME)[0]
- logger.debug("Instance '%s' got %s" %(m,test_ip))
- test_cmd = '/tmp/vping.sh %s'%test_ip
-
+ # test_ip = server.networks.get(NEUTRON_PRIVATE_NET_NAME)[0]
+ test_ip = IP_1
+ logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))
# boot VM 2
# we will boot then execute a ping script with cloud-init
# the long chain corresponds to the ping procedure converted with base 64
- # tune (e.g. flavor, images, network) to your specific openstack configuration here
- m = NAME_VM_2
- f = nova.flavors.find(name = FLAVOR)
- i = nova.images.find(name = GLANCE_IMAGE_NAME)
- n = nova.networks.find(label = NEUTRON_NET_NAME)
- # use base 64 format becaus bad surprises with sh script with cloud-init but script is just pinging
- #k = "demo-key"
- u = "#cloud-config\npassword: opnfv\nchpasswd: { expire: False }\nssh_pwauth: True\nwrite_files:\n- encoding: b64\n path: /tmp/vping.sh\n permissions: '0777'\n owner: root:root\n content: IyEvYmluL2Jhc2gKCndoaWxlIHRydWU7IGRvCiBwaW5nIC1jIDEgJDEgMj4mMSA+L2Rldi9udWxsCiBSRVM9JD8KIGlmIFsgIlokUkVTIiA9ICJaMCIgXSA7IHRoZW4KICBlY2hvICJ2UGluZyBPSyIKICBzbGVlcCAxMAogIHN1ZG8gc2h1dGRvd24gLWggbm93CiAgYnJlYWsKIGVsc2UKICBlY2hvICJ2UGluZyBLTyIKIGZpCiBzbGVlcCAxCmRvbmUK\nruncmd:\n - [ sh, -c, %s]"%test_cmd
+ # tune (e.g. flavor, images, network) to your specific openstack
+ # configuration here
+ u = "#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n " \
+ "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n echo 'vPing OK'\n " \
+ "break\n else\n echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip
+
# create VM
- logger.info("Creating instance '%s'..." %m)
- logger.debug("Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s \n userdata= \n%s" %(m,f,i,n,u))
- vm2 = nova.servers.create(
- name = m,
- flavor = f,
- image = i,
- nics = [{"net-id": n.id}],
- #key_name = k,
- userdata = u,
- #security_groups = s,
- #config_drive = v.id
- )
- # The injected script will shutdown the VM2 when the ping works
- # The console-log method is more consistent but doesn't work yet
+ logger.debug("Creating port 'vping-port-2' with IP %s..." % IP_2)
+ port_id2 = functest_utils.create_neutron_port(neutron_client,
+ "vping-port-2", network_id,
+ IP_2)
- waitVmActive(nova,vm2)
+ if not port_id2:
+ logger.error("Unable to create port.")
+ exit(-1)
+ logger.info("Creating instance '%s' with IP %s..." % (NAME_VM_2, IP_2))
+ logger.debug(
+ "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
+ "\n userdata= \n%s" % (
+ NAME_VM_2, flavor, image, network_id, u))
+ vm2 = nova_client.servers.create(
+ name=NAME_VM_2,
+ flavor=flavor,
+ image=image,
+ # nics = [{"net-id": network_id, "v4-fixed-ip": IP_2}],
+ nics=[{"port-id": port_id2}],
+ userdata=u
+ )
- logger.info("Waiting for ping, timeout is %d sec..." % PING_TIMEOUT)
- sec = 0
- while True:
- status = get_status(nova, vm2)
- #print status
- if status == "SHUTOFF" :
- EXIT_CODE = 0
- logger.info("vPing SUCCESSFUL after %d sec" % sec)
- break
- if sec == PING_TIMEOUT:
- logger.info("Timeout. vPing UNSUCCESSFUL.")
- break
- time.sleep(1)
- sec+=1
+ if not waitVmActive(nova_client, vm2):
+ logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
+ NAME_VM_2, functest_utils.get_instance_status(nova_client, vm2)))
+ cleanup(nova_client, neutron_client, image_id, network_dic, port_id1, port_id2)
+ return (EXIT_CODE)
+ else:
+ logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)
- """
- # I leave this here until we fix the console-log output
+ logger.info("Waiting for ping...")
sec = 0
console_log = vm2.get_console_output()
- while not ("vPing" in console_log):
+
+ while True:
time.sleep(1)
console_log = vm2.get_console_output()
- print "--"+console_log
-
+ # print "--"+console_log
# report if the test is failed
- if "vPing" in console_log:
- pMsg("vPing is OK")
+ if "vPing OK" in console_log:
+ logger.info("vPing detected!")
+
+ # we consider start time at VM1 booting
+ end_time_ts = time.time()
+ duration = round(end_time_ts - start_time_ts, 1)
+ logger.info("vPing duration:'%s'" % duration)
+ EXIT_CODE = 0
break
- else:
- pMsg("no vPing detected....")
- sec+=1
- if sec == PING_TIMEOUT:
+ elif sec == PING_TIMEOUT:
+ logger.info("Timeout reached.")
break
- """
+ else:
+ logger.debug("No vPing detected...")
+ sec += 1
- # delete both VMs
- logger.debug("Deleting Instances...")
- nova.servers.delete(vm1)
- logger.debug("Instance %s terminated." % NAME_VM_1)
- nova.servers.delete(vm2)
- logger.debug("Instance %s terminated." % NAME_VM_2)
+ cleanup(nova_client, neutron_client, image_id, network_dic, port_id1, port_id2)
+ test_status = "NOK"
+ if EXIT_CODE == 0:
+ logger.info("vPing OK")
+ test_status = "OK"
+ else:
+ logger.error("vPing FAILED")
+
+ try:
+ if args.report:
+ logger.debug("Push result into DB")
+ # TODO check path result for the file
+ git_version = functest_utils.get_git_branch(REPO_PATH)
+ pod_name = functest_utils.get_pod_name(logger)
+ functest_utils.push_results_to_db(TEST_DB,
+ "vPing",
+ logger, pod_name, git_version,
+ payload={'timestart': start_time_ts,
+ 'duration': duration,
+ 'status': test_status})
+ # with open("vPing-result.json", "w") as outfile:
+ # json.dump({'timestart': start_time_ts, 'duration': duration,
+ # 'status': test_status}, outfile, indent=4)
+ except:
+ logger.error("Error pushing results into Database")
- logger.debug("EXIT_CODE=%s" % EXIT_CODE)
exit(EXIT_CODE)
-
if __name__ == '__main__':
main()