diff options
-rw-r--r-- | jjb/doctor/doctor.yml | 6 | ||||
-rwxr-xr-x | jjb/functest/set-functest-env.sh | 34 | ||||
-rw-r--r-- | jjb/global/releng-macros.yml | 2 | ||||
-rw-r--r-- | jjb/infra/bifrost-verify-jobs.yml | 2 | ||||
-rw-r--r-- | modules/opnfv/deployment/apex/adapter.py | 43 | ||||
-rw-r--r-- | modules/opnfv/deployment/example.py | 15 | ||||
-rw-r--r-- | modules/opnfv/deployment/fuel/adapter.py | 172 | ||||
-rw-r--r-- | modules/opnfv/deployment/manager.py | 70 | ||||
-rw-r--r-- | modules/opnfv/utils/ovs_logger.py | 2 | ||||
-rwxr-xr-x | utils/test/reporting/functest/reporting-status.py | 6 | ||||
-rw-r--r-- | utils/test/reporting/functest/testCase.py | 6 | ||||
-rw-r--r-- | utils/test/reporting/reporting.yaml | 10 | ||||
-rw-r--r-- | utils/test/reporting/utils/reporting_utils.py | 3 | ||||
-rw-r--r-- | utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py | 11 | ||||
-rw-r--r-- | utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py | 35 |
15 files changed, 263 insertions, 154 deletions
diff --git a/jjb/doctor/doctor.yml b/jjb/doctor/doctor.yml index 2333fca14..28888d673 100644 --- a/jjb/doctor/doctor.yml +++ b/jjb/doctor/doctor.yml @@ -22,9 +22,9 @@ - fuel: slave-label: 'ool-virtual2' pod: 'ool-virtual2' - - joid: - slave-label: 'ool-virtual3' - pod: 'ool-virtual3' + #- joid: + # slave-label: 'ool-virtual3' + # pod: 'ool-virtual3' inspector: - 'sample' diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh index abec480dc..05e3d5792 100755 --- a/jjb/functest/set-functest-env.sh +++ b/jjb/functest/set-functest-env.sh @@ -17,32 +17,34 @@ if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then echo "Credentials file detected: ${RC_FILE_PATH}" # volume if credentials file path is given to Functest rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds" + RC_FLAG=1 fi if [[ ${INSTALLER_TYPE} == 'apex' ]]; then ssh_options="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - if sudo virsh list | grep instack; then - instack_mac=$(sudo virsh domiflist instack | grep default | \ - grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") - elif sudo virsh list | grep undercloud; then - instack_mac=$(sudo virsh domiflist undercloud | grep default | \ + if sudo virsh list | grep undercloud; then + echo "Installer VM detected" + undercloud_mac=$(sudo virsh domiflist undercloud | grep default | \ grep -Eo "[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+") + INSTALLER_IP=$(/usr/sbin/arp -e | grep ${undercloud_mac} | awk {'print $1'}) + sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" + sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc + stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" + + if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then + sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable + fi + elif [[ "$RC_FLAG" == 1 ]]; then + echo "No available installer VM, but credentials provided...continuing" else - echo "No available installer VM exists...exiting" + echo "No available installer VM exists and no credentials provided...exiting" exit 1 fi - INSTALLER_IP=$(/usr/sbin/arp -e | grep ${instack_mac} | awk {'print $1'}) - sshkey_vol="-v /root/.ssh/id_rsa:/root/.ssh/id_rsa" - sudo scp $ssh_options root@${INSTALLER_IP}:/home/stack/stackrc ${HOME}/stackrc - stackrc_vol="-v ${HOME}/stackrc:/home/opnfv/functest/conf/stackrc" - if sudo iptables -C FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable - fi - if sudo iptables -C FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable 2> ${redirect}; then - sudo iptables -D FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable - fi fi diff --git a/jjb/global/releng-macros.yml b/jjb/global/releng-macros.yml index 9b09e315f..19002fe8c 100644 --- a/jjb/global/releng-macros.yml +++ b/jjb/global/releng-macros.yml @@ -426,7 +426,7 @@ name: clean-workspace-log builders: - shell: | - find $WORKSPACE -type f -print -name '*.log' | xargs rm -f + find $WORKSPACE -type f -name '*.log' | xargs rm -f - publisher: name: archive-artifacts diff --git a/jjb/infra/bifrost-verify-jobs.yml b/jjb/infra/bifrost-verify-jobs.yml index c99023edf..d595d4bef 100644 --- a/jjb/infra/bifrost-verify-jobs.yml +++ b/jjb/infra/bifrost-verify-jobs.yml @@ -147,7 +147,7 @@ publishers: - email: - recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com zhang.jun3g@zte.com.cn + recipients: fatih.degirmenci@ericsson.com yroblamo@redhat.com mchandras@suse.de jack.morgan@intel.com julienjut@gmail.com #-------------------------------- # trigger macros #-------------------------------- diff --git a/modules/opnfv/deployment/apex/adapter.py b/modules/opnfv/deployment/apex/adapter.py index 1b81e781b..225e17438 100644 --- a/modules/opnfv/deployment/apex/adapter.py +++ b/modules/opnfv/deployment/apex/adapter.py @@ -25,9 +25,9 @@ class ApexAdapter(manager.DeploymentHandler): installer_pwd=None, pkey_file=pkey_file) - def nodes(self): + def get_nodes(self): nodes = [] - cmd = "source /home/stack/stackrc;nova list 2>/dev/null" + cmd = "source /home/stack/stackrc;openstack server list" output = self.installer_node.run_cmd(cmd) lines = output.rsplit('\n') if len(lines) < 4: @@ -35,28 +35,34 @@ class ApexAdapter(manager.DeploymentHandler): return None for line in lines: - if 'controller' in line: - roles = "controller" - elif 'compute' in line: - roles = "compute" - else: + roles = [] + if any(x in line for x in ['-----', 'Networks']): continue - if 'Daylight' in line: - roles += ", OpenDaylight" + if 'controller' in line: + roles.append(manager.Role.CONTROLLER) + if 'compute' in line: + roles.append(manager.Role.COMPUTE) + if 'opendaylight' in line.lower(): + roles.append(manager.Role.ODL) + fields = line.split('|') - id = re.sub('[!| ]', '', fields[1]) - name = re.sub('[!| ]', '', fields[2]) - status_node = re.sub('[!| ]', '', fields[3]) - ip = re.sub('[!| ctlplane=]', '', fields[6]) + id = re.sub('[!| ]', '', fields[1]).encode() + name = re.sub('[!| ]', '', fields[2]).encode() + status_node = re.sub('[!| ]', '', fields[3]).encode().lower() + ip = re.sub('[!| ctlplane=]', '', fields[4]).encode() - if status_node == 'ACTIVE': - status = manager.Node.STATUS_OK + ssh_client = None + if 'active' in status_node: + status = manager.NodeStatus.STATUS_OK ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='heat-admin', pkey_file=self.pkey_file) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node(id, ip, name, status, roles, ssh_client) nodes.append(node) @@ -73,8 +79,9 @@ class ApexAdapter(manager.DeploymentHandler): "grep Description|sed 's/^.*\: //'") cmd_ver = ("sudo yum info opendaylight 2>/dev/null|" "grep Version|sed 's/^.*\: //'") + description = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): description = node.run_cmd(cmd_descr) version = node.run_cmd(cmd_ver) break diff --git a/modules/opnfv/deployment/example.py b/modules/opnfv/deployment/example.py index f45683260..3999a11c6 100644 --- a/modules/opnfv/deployment/example.py +++ b/modules/opnfv/deployment/example.py @@ -3,6 +3,7 @@ from opnfv.deployment import factory +print("########## APEX ##########") handler = factory.Factory.get_handler('apex', '192.168.122.135', 'stack', @@ -19,3 +20,17 @@ for node in nodes: (node.run_cmd('hostname'), node.ip)) print(handler.get_deployment_info()) + + +print("########## FUEL ##########") +handler = factory.Factory.get_handler('fuel', + '10.20.0.2', + 'root', + installer_pwd='r00tme') + +print(handler.get_deployment_info()) + +print("List of nodes in cluster 4:") +nodes = handler.get_nodes({'cluster': '4'}) +for node in nodes: + print(node) diff --git a/modules/opnfv/deployment/fuel/adapter.py b/modules/opnfv/deployment/fuel/adapter.py index d53966e82..aa5ad7a44 100644 --- a/modules/opnfv/deployment/fuel/adapter.py +++ b/modules/opnfv/deployment/fuel/adapter.py @@ -13,7 +13,7 @@ from opnfv.deployment import manager from opnfv.utils import opnfv_logger as logger from opnfv.utils import ssh_utils -logger = logger.Logger("FuelAdapter").getLogger() +logger = logger.Logger(__name__).getLogger() class FuelAdapter(manager.DeploymentHandler): @@ -40,7 +40,7 @@ class FuelAdapter(manager.DeploymentHandler): index_name = -1 index_release_id = -1 - for i in range(len(fields) - 1): + for i in range(len(fields)): if "id" in fields[i]: index_id = i elif "status" in fields[i]: @@ -51,7 +51,7 @@ class FuelAdapter(manager.DeploymentHandler): index_release_id = i # order env info - for i in range(2, len(lines) - 1): + for i in range(2, len(lines)): fields = lines[i].rsplit(' | ') dict = {"id": fields[index_id].strip(), "status": fields[index_status].strip(), @@ -61,88 +61,116 @@ class FuelAdapter(manager.DeploymentHandler): return environments - def nodes(self, options=None): + def get_nodes(self, options=None): + + if options and options['cluster'] and len(self.nodes) > 0: + n = [] + for node in self.nodes: + if str(node.info['cluster']) == str(options['cluster']): + n.append(node) + return n + + try: + # if we have retrieved previously all the nodes, don't do it again + # This fails the first time when the constructor calls this method + # therefore the try/except + if len(self.nodes) > 0: + return self.nodes + except: + pass + nodes = [] cmd = 'fuel node' output = self.installer_node.run_cmd(cmd) lines = output.rsplit('\n') if len(lines) < 2: logger.info("No nodes found in the deployment.") - return None - else: - # get fields indexes - fields = lines[0].rsplit(' | ') - - index_id = -1 - index_status = -1 - index_name = -1 - index_cluster = -1 - index_ip = -1 - index_mac = -1 - index_roles = -1 - index_online = -1 - - for i in range(0, len(fields) - 1): - if "id" in fields[i]: - index_id = i - elif "status" in fields[i]: - index_status = i - elif "name" in fields[i]: - index_name = i - elif "cluster" in fields[i]: - index_cluster = i - elif "ip" in fields[i]: - index_ip = i - elif "mac" in fields[i]: - index_mac = i - elif "roles " in fields[i]: - index_roles = i - elif "online" in fields[i]: - index_online = i - - # order nodes info - for i in range(2, len(lines) - 1): - fields = lines[i].rsplit(' | ') - - id = fields[index_id].strip(), - ip = fields[index_ip].strip() - status_node = fields[index_status].strip() - name = fields[index_name].strip() - roles = fields[index_roles].strip() - - dict = {"cluster": fields[index_cluster].strip(), - "mac": fields[index_mac].strip(), - "online": fields[index_online].strip()} - - if status_node == 'ready': - status = manager.Node.STATUS_OK - proxy = {'ip': self.installer_ip, - 'username': self.installer_user, - 'password': self.installer_pwd} - ssh_client = ssh_utils.get_ssh_client(hostname=ip, - username='root', - proxy=proxy) - else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None - - node = manager.Node( - id, ip, name, status, roles, ssh_client, dict) + return nodes + + # get fields indexes + fields = lines[0].rsplit(' | ') + + index_id = -1 + index_status = -1 + index_name = -1 + index_cluster = -1 + index_ip = -1 + index_mac = -1 + index_roles = -1 + index_online = -1 + + for i in range(len(fields)): + if "group_id" in fields[i]: + break + elif "id" in fields[i]: + index_id = i + elif "status" in fields[i]: + index_status = i + elif "name" in fields[i]: + index_name = i + elif "cluster" in fields[i]: + index_cluster = i + elif "ip" in fields[i]: + index_ip = i + elif "mac" in fields[i]: + index_mac = i + elif "roles " in fields[i]: + index_roles = i + elif "online" in fields[i]: + index_online = i + + # order nodes info + for i in range(2, len(lines)): + fields = lines[i].rsplit(' | ') + id = fields[index_id].strip().encode() + ip = fields[index_ip].strip().encode() + status_node = fields[index_status].strip().encode().lower() + name = fields[index_name].strip().encode() + roles_all = fields[index_roles].strip().encode().lower() + + roles = [x for x in [manager.Role.CONTROLLER, + manager.Role.COMPUTE, + manager.Role.ODL] if x in roles_all] + + dict = {"cluster": fields[index_cluster].strip().encode(), + "mac": fields[index_mac].strip().encode(), + "status_node": status_node, + "online": fields[index_online].strip().encode()} + + ssh_client = None + if status_node == 'ready': + status = manager.NodeStatus.STATUS_OK + proxy = {'ip': self.installer_ip, + 'username': self.installer_user, + 'password': self.installer_pwd} + ssh_client = ssh_utils.get_ssh_client(hostname=ip, + username='root', + proxy=proxy) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE + elif 'discover' in status_node: + status = manager.NodeStatus.STATUS_UNUSED + else: + status = manager.NodeStatus.STATUS_INACTIVE + + node = manager.Node( + id, ip, name, status, roles, ssh_client, dict) + if options and options['cluster']: + if fields[index_cluster].strip() == options['cluster']: + nodes.append(node) + else: nodes.append(node) - # TODO: Add support for Fuel cluster selection - ''' - if options and options['cluster']: - if fields[index_cluster].strip() == options['cluster']: - ''' - + self.get_nodes_called = True return nodes def get_openstack_version(self): cmd = 'source openrc;nova-manage version 2>/dev/null' version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): version = node.run_cmd(cmd) break return version @@ -151,7 +179,7 @@ class FuelAdapter(manager.DeploymentHandler): cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'" version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): odl_version = node.run_cmd(cmd) if odl_version: version = 'OpenDaylight ' + odl_version diff --git a/modules/opnfv/deployment/manager.py b/modules/opnfv/deployment/manager.py index 9f77ff0a7..43a79488b 100644 --- a/modules/opnfv/deployment/manager.py +++ b/modules/opnfv/deployment/manager.py @@ -89,25 +89,35 @@ class Deployment(object): sdn_controller=self.deployment_info['sdn_controller']) for node in self.deployment_info['nodes']: - s += '\t\t{node_object}\n'.format(node_object=node) + s += '{node_object}\n'.format(node_object=node) return s -class Node(object): +class Role(): + CONTROLLER = 'controller' + COMPUTE = 'compute' + ODL = 'opendaylight' + ONOS = 'onos' + +class NodeStatus(): STATUS_OK = 'active' STATUS_INACTIVE = 'inactive' STATUS_OFFLINE = 'offline' - STATUS_FAILED = 'failed' + STATUS_ERROR = 'error' + STATUS_UNUSED = 'unused' + + +class Node(object): def __init__(self, id, ip, name, status, - roles, - ssh_client, + roles=[], + ssh_client=None, info={}): self.id = id self.ip = ip @@ -121,7 +131,7 @@ class Node(object): ''' SCP file from a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Fetching %s from %s" % (src, self.ip)) @@ -137,7 +147,7 @@ class Node(object): ''' SCP file to a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Copying %s to %s" % (src, self.ip)) @@ -153,9 +163,9 @@ class Node(object): ''' Run command remotely on a node ''' - if self.status is not Node.STATUS_OK: - logger.info("The node %s is not active" % self.ip) - return 1 + if self.status is not NodeStatus.STATUS_OK: + logger.error("The node %s is not active" % self.ip) + return None _, stdout, stderr = (self.ssh_client.exec_command(cmd)) error = stderr.readlines() if len(error) > 0: @@ -187,7 +197,7 @@ class Node(object): ''' Returns if the node is a controller ''' - if 'controller' in self.get_attribute('roles'): + if 'controller' in self.roles: return True return False @@ -195,12 +205,32 @@ class Node(object): ''' Returns if the node is a compute ''' - if 'compute' in self.get_attribute('roles'): + if 'compute' in self.roles: return True return False + def get_ovs_info(self): + ''' + Returns the ovs version installed + ''' + cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'" + return self.run_cmd(cmd) + def __str__(self): - return str(self.get_dict()) + return ''' + name: {name} + id: {id} + ip: {ip} + status: {status} + roles: {roles} + ovs: {ovs} + info: {info}'''.format(name=self.name, + id=self.id, + ip=self.ip, + status=self.status, + roles=self.roles, + ovs=self.get_ovs_info(), + info=self.info) class DeploymentHandler(object): @@ -236,14 +266,14 @@ class DeploymentHandler(object): self.installer_node = Node(id='', ip=installer_ip, name=installer, - status='active', + status=NodeStatus.STATUS_OK, ssh_client=self.installer_connection, roles='installer node') else: raise Exception( 'Cannot establish connection to the installer node!') - self.nodes = self.nodes() + self.nodes = self.get_nodes() @abstractmethod def get_openstack_version(self): @@ -267,18 +297,12 @@ class DeploymentHandler(object): raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED) @abstractmethod - def nodes(self, options=None): + def get_nodes(self, options=None): ''' Generates a list of all the nodes in the deployment ''' raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED) - def get_nodes(self, options=None): - ''' - Returns the list of Node objects - ''' - return self.nodes - def get_installer_node(self): ''' Returns the installer node object @@ -296,4 +320,4 @@ class DeploymentHandler(object): pod=os.getenv('NODE_NAME', 'Unknown'), openstack_version=self.get_openstack_version(), sdn_controller=self.get_sdn_version(), - nodes=self.nodes) + nodes=self.get_nodes()) diff --git a/modules/opnfv/utils/ovs_logger.py b/modules/opnfv/utils/ovs_logger.py index 75b4cec80..d650eb9ab 100644 --- a/modules/opnfv/utils/ovs_logger.py +++ b/modules/opnfv/utils/ovs_logger.py @@ -7,7 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## -import opnfv.utils.OPNFVLogger as OPNFVLogger +import opnfv.utils.opnfv_logger as OPNFVLogger import os import time import shutil diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index 158ee597b..df5632335 100755 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -61,13 +61,13 @@ logger.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") -# we consider Tier 1 (smoke),2 (features) +# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features) # to validate scenarios -# Tier > 4 are not used to validate scenarios but we display the results anyway +# Tier > 2 are not used to validate scenarios but we display the results anyway # tricky thing for the API as some tests are Functest tests # other tests are declared directly in the feature projects for tier in config_tiers: - if tier['order'] > 0 and tier['order'] < 2: + if tier['order'] >= 0 and tier['order'] < 2: for case in tier['testcases']: if case['name'] not in blacklist: testValid.append(tc.TestCase(case['name'], diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index df0874e0b..22196c86b 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -43,7 +43,8 @@ class TestCase(object): 'parser': 'Parser', 'connection_check': 'Health (connection)', 'api_check': 'Health (api)', - 'snaps_smoke': 'SNAPS'} + 'snaps_smoke': 'SNAPS', + 'snaps_health_check': 'Health (dhcp)'} try: self.displayName = display_name_matrix[self.name] except: @@ -138,7 +139,8 @@ class TestCase(object): 'parser': 'parser-basics', 'connection_check': 'connection_check', 'api_check': 'api_check', - 'snaps_smoke': 'snaps_smoke' + 'snaps_smoke': 'snaps_smoke', + 'snaps_health_check': 'snaps_health_check' } try: return test_match_matrix[self.name] diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml index 9db0890b2..2fb6b7831 100644 --- a/utils/test/reporting/reporting.yaml +++ b/utils/test/reporting/reporting.yaml @@ -36,12 +36,20 @@ functest: - ovno - security_scan - rally_sanity + - healthcheck + - odl_netvirt + - aaa + - cloudify_ims + - orchestra_ims + - juju_epc + - orchestra + - promise max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR jenkins_url: https://build.opnfv.org/ci/view/functest/job/ exclude_noha: False - exclude_virtual: True + exclude_virtual: False yardstick: test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py index fc5d188af..1879fb628 100644 --- a/utils/test/reporting/utils/reporting_utils.py +++ b/utils/test/reporting/utils/reporting_utils.py @@ -269,7 +269,8 @@ def getJenkinsUrl(build_tag): url_base = get_config('functest.jenkins_url') try: build_id = [int(s) for s in build_tag.split("-") if s.isdigit()] - url_id = build_tag[8:-(len(build_id) + 3)] + "/" + str(build_id[0]) + url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] + + "/" + str(build_id[0])) jenkins_url = url_base + url_id + "/console" except: print('Impossible to get jenkins url:') diff --git a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py index a8c1a94fe..7c8c333a5 100644 --- a/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py +++ b/utils/test/testapi/opnfv_testapi/resources/scenario_handlers.py @@ -116,6 +116,17 @@ class ScenarioGURHandler(GenericScenarioHandler): db_keys = ['name'] self._update(query, db_keys) + @swagger.operation(nickname="deleteScenarioByName") + def delete(self, name): + """ + @description: delete a scenario by name + @return 200: delete success + @raise 404: scenario not exist: + """ + + query = {'name': name} + self._delete(query) + def _update_query(self, keys, data): query = dict() equal = True diff --git a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py index c15dc32ea..3a0abf934 100644 --- a/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py +++ b/utils/test/testapi/opnfv_testapi/tests/unit/test_scenario.py @@ -1,11 +1,9 @@ from copy import deepcopy +from datetime import datetime import json import os -from datetime import datetime -from opnfv_testapi.common.constants import HTTP_BAD_REQUEST -from opnfv_testapi.common.constants import HTTP_FORBIDDEN -from opnfv_testapi.common.constants import HTTP_OK +from opnfv_testapi.common import constants import opnfv_testapi.resources.scenario_models as models from test_testcase import TestBase @@ -38,7 +36,7 @@ class TestScenarioBase(TestBase): return res.href.split('/')[-1] def assert_res(self, code, scenario, req=None): - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) if req is None: req = self.req_d scenario_dict = scenario.format_http() @@ -61,29 +59,29 @@ class TestScenarioBase(TestBase): class TestScenarioCreate(TestScenarioBase): def test_withoutBody(self): (code, body) = self.create() - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) def test_emptyName(self): req_empty = models.ScenarioCreateRequest('') (code, body) = self.create(req_empty) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_noneName(self): req_none = models.ScenarioCreateRequest(None) (code, body) = self.create(req_none) - self.assertEqual(code, HTTP_BAD_REQUEST) + self.assertEqual(code, constants.HTTP_BAD_REQUEST) self.assertIn('name missing', body) def test_success(self): (code, body) = self.create_d() - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assert_create_body(body) def test_alreadyExist(self): self.create_d() (code, body) = self.create_d() - self.assertEqual(code, HTTP_FORBIDDEN) + self.assertEqual(code, constants.HTTP_FORBIDDEN) self.assertIn('already exists', body) @@ -126,7 +124,7 @@ class TestScenarioGet(TestScenarioBase): def _query_and_assert(self, query, found=True, reqs=None): code, body = self.query(query) if not found: - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self.assertEqual(0, len(body.scenarios)) else: self.assertEqual(len(reqs), len(body.scenarios)) @@ -296,10 +294,23 @@ class TestScenarioUpdate(TestScenarioBase): def _update_and_assert(self, update_req, new_scenario, name=None): code, _ = self.update(update_req, self.scenario) - self.assertEqual(code, HTTP_OK) + self.assertEqual(code, constants.HTTP_OK) self._get_and_assert(self._none_default(name, self.scenario), new_scenario) @staticmethod def _none_default(check, default): return check if check else default + + +class TestScenarioDelete(TestScenarioBase): + def test_notFound(self): + code, body = self.delete('notFound') + self.assertEqual(code, constants.HTTP_NOT_FOUND) + + def test_success(self): + scenario = self.create_return_name(self.req_d) + code, _ = self.delete(scenario) + self.assertEqual(code, constants.HTTP_OK) + code, _ = self.get(scenario) + self.assertEqual(code, constants.HTTP_NOT_FOUND) |