diff options
36 files changed, 501 insertions, 319 deletions
diff --git a/jjb/3rd_party_ci/odl-netvirt.yml b/jjb/3rd_party_ci/odl-netvirt.yml index 7a9998433..470e4335e 100644 --- a/jjb/3rd_party_ci/odl-netvirt.yml +++ b/jjb/3rd_party_ci/odl-netvirt.yml @@ -128,7 +128,8 @@ - name: 'functest-netvirt-virtual-suite-{stream}' predefined-parameters: | DEPLOY_SCENARIO=os-odl_l3-nofeature-ha - FUNCTEST_SUITE_NAME=healthcheck + FUNCTEST_SUITE_NAME=tempest_smoke_serial + RC_FILE_PATH=$HOME/cloner-info/overcloudrc node-parameters: true kill-phase-on: FAILURE abort-all-job: false diff --git a/jjb/apex/apex-deploy.sh b/jjb/apex/apex-deploy.sh index b68225f15..dc70488e7 100755 --- a/jjb/apex/apex-deploy.sh +++ b/jjb/apex/apex-deploy.sh @@ -183,6 +183,8 @@ if [[ "$JOB_NAME" == *virtual* ]]; then # settings for virtual deployment if [ "$IPV6_FLAG" == "True" ]; then NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_v6.yaml" + elif echo ${DEPLOY_SCENARIO} | grep fdio; then + NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings_vpp.yaml" else NETWORK_FILE="${NETWORK_SETTINGS_DIR}/network_settings.yaml" fi diff --git a/jjb/apex/apex-snapshot-deploy.sh b/jjb/apex/apex-snapshot-deploy.sh index 773edd228..8274740c8 100644 --- a/jjb/apex/apex-snapshot-deploy.sh +++ b/jjb/apex/apex-snapshot-deploy.sh @@ -81,6 +81,12 @@ if [ -z "$virsh_networks" ]; then exit 1 fi +echo "Checking overcloudrc" +if ! stat overcloudrc; then + echo "ERROR: overcloudrc does not exist in snap unpack" + exit 1 +fi + for network_def in ${virsh_networks}; do sudo virsh net-create ${network_def} network=$(echo ${network_def} | awk -F '.' '{print $1}') @@ -96,9 +102,19 @@ for network_def in ${virsh_networks}; do sudo ip addr add 192.0.2.99/24 dev br-admin sudo ip link set up dev br-admin elif [ "br-${network}" == 'br-external' ]; then - echo "Configuring IP 192.168.37.99 on br-external" - sudo ip addr add 192.168.37.99/24 dev br-external + echo "Configuring IP 192.168.37.1 on br-external" + sudo ip addr add 192.168.37.1/24 dev br-external sudo ip link set up dev br-external + # Routes for admin network + # The overcloud controller is multi-homed and will fail to respond + # to traffic from the functest container due to reverse-path-filtering + # This route allows reverse traffic, by forcing admin network destined + # traffic through the external network for controller IPs only. + # Compute nodes have no ip on external interfaces. + controller_ips=$(cat overcloudrc | grep -Eo "192.0.2.[0-9]+") + for ip in $controller_ips; do + sudo ip route add ${ip}/32 dev br-external + done fi fi done @@ -120,17 +136,11 @@ for node_def in ${virsh_vm_defs}; do echo "Node: ${node} started" done -echo "Checking overcloudrc" -if ! stat overcloudrc; then - echo "ERROR: overcloudrc does not exist in snap unpack" - exit 1 -fi - # copy overcloudrc for functest mkdir -p $HOME/cloner-info cp -f overcloudrc $HOME/cloner-info/ -admin_controller_ip=$(cat overcloudrc | grep -Eo "192.0.2.[0-9]+") +admin_controller_ip=$(cat overcloudrc | grep -Eo -m 1 "192.0.2.[0-9]+") netvirt_url="http://${admin_controller_ip}:8081/restconf/operational/network-topology:network-topology/topology/netvirt:1" source overcloudrc diff --git a/jjb/apex/apex.yml b/jjb/apex/apex.yml index e3f0f53bc..ff9fbec14 100644 --- a/jjb/apex/apex.yml +++ b/jjb/apex/apex.yml @@ -672,6 +672,36 @@ build-step-failure-threshold: 'never' failure-threshold: 'never' unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'apex-deploy-baremetal-os-odl-gluon-noha-{stream}' + predefined-parameters: | + BUILD_DIRECTORY=apex-build-{stream}/.build + OPNFV_CLEAN=yes + git-revision: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + block: true + - trigger-builds: + - project: 'functest-apex-{daily-slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl-gluon-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' + - trigger-builds: + - project: 'yardstick-apex-{slave}-daily-{stream}' + predefined-parameters: + DEPLOY_SCENARIO=os-odl-gluon-noha + block: true + same-node: true + block-thresholds: + build-step-failure-threshold: 'never' + failure-threshold: 'never' + unstable-threshold: 'FAILURE' # Colorado Build - job-template: name: 'apex-build-colorado' diff --git a/jjb/copper/copper.yml b/jjb/copper/copper.yml index eff66ba29..ea1af473c 100644 --- a/jjb/copper/copper.yml +++ b/jjb/copper/copper.yml @@ -59,4 +59,10 @@ builders: - shell: | - echo "Nothing to verify!" + #!/bin/bash + set -o errexit + set -o nounset + set -o pipefail + + cd $WORKSPACE/ci + shellcheck -f tty tests/*.sh diff --git a/jjb/daisy4nfv/daisy-project-jobs.yml b/jjb/daisy4nfv/daisy-project-jobs.yml index f712adb0a..156740980 100644 --- a/jjb/daisy4nfv/daisy-project-jobs.yml +++ b/jjb/daisy4nfv/daisy-project-jobs.yml @@ -182,21 +182,21 @@ name: 'daisy-build-daily-macro' builders: - shell: - !include-raw-escape: ./daisy4nfv-basic.sh + !include-raw: ./daisy4nfv-basic.sh - shell: - !include-raw-escape: ./daisy4nfv-build.sh + !include-raw: ./daisy4nfv-build.sh - shell: - !include-raw-escape: ./daisy4nfv-upload-artifact.sh + !include-raw: ./daisy4nfv-upload-artifact.sh - shell: - !include-raw-escape: ./daisy4nfv-workspace-cleanup.sh + !include-raw: ./daisy4nfv-workspace-cleanup.sh - builder: name: 'daisy-deploy-daily-macro' builders: - shell: - !include-raw-escape: ./daisy4nfv-download-artifact.sh + !include-raw: ./daisy4nfv-download-artifact.sh - shell: - !include-raw-escape: ./daisy4nfv-deploy.sh + !include-raw: ./daisy4nfv-deploy.sh - builder: name: 'daisy-test-daily-macro' diff --git a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml index febce6f82..ee82c14b2 100644 --- a/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml +++ b/jjb/daisy4nfv/daisy4nfv-verify-jobs.yml @@ -169,11 +169,11 @@ name: 'daisy-verify-build-macro' builders: - shell: - !include-raw-escape: ./daisy4nfv-basic.sh + !include-raw: ./daisy4nfv-basic.sh - shell: - !include-raw-escape: ./daisy4nfv-build.sh + !include-raw: ./daisy4nfv-build.sh - shell: - !include-raw-escape: ./daisy4nfv-workspace-cleanup.sh + !include-raw: ./daisy4nfv-workspace-cleanup.sh ##################################### # parameter macros diff --git a/jjb/functest/functest-cleanup.sh b/jjb/functest/functest-cleanup.sh index 3ef9b90dd..fc277b9ed 100755 --- a/jjb/functest/functest-cleanup.sh +++ b/jjb/functest/functest-cleanup.sh @@ -15,7 +15,10 @@ if [[ -n ${dangling_images} ]]; then echo " Removing $FUNCTEST_IMAGE:<none> images and their containers..." for image_id in "${dangling_images[@]}"; do echo " Removing image_id: $image_id and its containers" - docker ps -a | grep $image_id | awk '{print $1}'| xargs docker rm -f >${redirect} + containers=$(docker ps -a | grep $image_id | awk '{print $1}') + if [[ -n "$containers" ]];then + docker rm -f $containers >${redirect} + fi docker rmi $image_id >${redirect} done fi diff --git a/jjb/functest/set-functest-env.sh b/jjb/functest/set-functest-env.sh index 5224793dc..abec480dc 100755 --- a/jjb/functest/set-functest-env.sh +++ b/jjb/functest/set-functest-env.sh @@ -14,8 +14,9 @@ if [[ ${INSTALLER_TYPE} == 'joid' ]]; then fi if [[ ${RC_FILE_PATH} != '' ]] && [[ -f ${RC_FILE_PATH} ]] ; then + echo "Credentials file detected: ${RC_FILE_PATH}" # volume if credentials file path is given to Functest - rc_file_vol="-v $RC_FILE_PATH:/home/opnfv/functest/conf/openstack.creds" + rc_file_vol="-v ${RC_FILE_PATH}:/home/opnfv/functest/conf/openstack.creds" fi diff --git a/jjb/global/installer-params.yml b/jjb/global/installer-params.yml index c3e775681..fc9f34a48 100644 --- a/jjb/global/installer-params.yml +++ b/jjb/global/installer-params.yml @@ -122,7 +122,7 @@ description: 'IP of the installer' - string: name: INSTALLER_TYPE - default: netvirt + default: apex description: 'Installer used for deploying OPNFV on this POD' - string: name: EXTERNAL_NETWORK diff --git a/jjb/releng/opnfv-lint.yml b/jjb/releng/opnfv-lint.yml index 4de47e87e..37cdef28f 100644 --- a/jjb/releng/opnfv-lint.yml +++ b/jjb/releng/opnfv-lint.yml @@ -53,7 +53,7 @@ comment-contains-value: 'reverify' projects: - project-compare-type: 'REG_EXP' - project-pattern: 'functest|sdnvpn|qtip|daisy|sfc|escalator' + project-pattern: 'functest|sdnvpn|qtip|daisy|sfc|escalator|releng' branches: - branch-compare-type: 'ANT' branch-pattern: '**/{branch}' diff --git a/modules/opnfv/deployment/apex/adapter.py b/modules/opnfv/deployment/apex/adapter.py index 1b81e781b..225e17438 100644 --- a/modules/opnfv/deployment/apex/adapter.py +++ b/modules/opnfv/deployment/apex/adapter.py @@ -25,9 +25,9 @@ class ApexAdapter(manager.DeploymentHandler): installer_pwd=None, pkey_file=pkey_file) - def nodes(self): + def get_nodes(self): nodes = [] - cmd = "source /home/stack/stackrc;nova list 2>/dev/null" + cmd = "source /home/stack/stackrc;openstack server list" output = self.installer_node.run_cmd(cmd) lines = output.rsplit('\n') if len(lines) < 4: @@ -35,28 +35,34 @@ class ApexAdapter(manager.DeploymentHandler): return None for line in lines: - if 'controller' in line: - roles = "controller" - elif 'compute' in line: - roles = "compute" - else: + roles = [] + if any(x in line for x in ['-----', 'Networks']): continue - if 'Daylight' in line: - roles += ", OpenDaylight" + if 'controller' in line: + roles.append(manager.Role.CONTROLLER) + if 'compute' in line: + roles.append(manager.Role.COMPUTE) + if 'opendaylight' in line.lower(): + roles.append(manager.Role.ODL) + fields = line.split('|') - id = re.sub('[!| ]', '', fields[1]) - name = re.sub('[!| ]', '', fields[2]) - status_node = re.sub('[!| ]', '', fields[3]) - ip = re.sub('[!| ctlplane=]', '', fields[6]) + id = re.sub('[!| ]', '', fields[1]).encode() + name = re.sub('[!| ]', '', fields[2]).encode() + status_node = re.sub('[!| ]', '', fields[3]).encode().lower() + ip = re.sub('[!| ctlplane=]', '', fields[4]).encode() - if status_node == 'ACTIVE': - status = manager.Node.STATUS_OK + ssh_client = None + if 'active' in status_node: + status = manager.NodeStatus.STATUS_OK ssh_client = ssh_utils.get_ssh_client(hostname=ip, username='heat-admin', pkey_file=self.pkey_file) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None + status = manager.NodeStatus.STATUS_INACTIVE node = manager.Node(id, ip, name, status, roles, ssh_client) nodes.append(node) @@ -73,8 +79,9 @@ class ApexAdapter(manager.DeploymentHandler): "grep Description|sed 's/^.*\: //'") cmd_ver = ("sudo yum info opendaylight 2>/dev/null|" "grep Version|sed 's/^.*\: //'") + description = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): description = node.run_cmd(cmd_descr) version = node.run_cmd(cmd_ver) break diff --git a/modules/opnfv/deployment/example.py b/modules/opnfv/deployment/example.py index 6a76eb9c3..3999a11c6 100644 --- a/modules/opnfv/deployment/example.py +++ b/modules/opnfv/deployment/example.py @@ -3,6 +3,7 @@ from opnfv.deployment import factory +print("########## APEX ##########") handler = factory.Factory.get_handler('apex', '192.168.122.135', 'stack', @@ -18,4 +19,18 @@ for node in nodes: print("Hello, I am node '%s' and my ip is %s." % (node.run_cmd('hostname'), node.ip)) -print handler.get_deployment_info() +print(handler.get_deployment_info()) + + +print("########## FUEL ##########") +handler = factory.Factory.get_handler('fuel', + '10.20.0.2', + 'root', + installer_pwd='r00tme') + +print(handler.get_deployment_info()) + +print("List of nodes in cluster 4:") +nodes = handler.get_nodes({'cluster': '4'}) +for node in nodes: + print(node) diff --git a/modules/opnfv/deployment/fuel/adapter.py b/modules/opnfv/deployment/fuel/adapter.py index d53966e82..9e22ba891 100644 --- a/modules/opnfv/deployment/fuel/adapter.py +++ b/modules/opnfv/deployment/fuel/adapter.py @@ -13,7 +13,7 @@ from opnfv.deployment import manager from opnfv.utils import opnfv_logger as logger from opnfv.utils import ssh_utils -logger = logger.Logger("FuelAdapter").getLogger() +logger = logger.Logger(__name__).getLogger() class FuelAdapter(manager.DeploymentHandler): @@ -40,7 +40,7 @@ class FuelAdapter(manager.DeploymentHandler): index_name = -1 index_release_id = -1 - for i in range(len(fields) - 1): + for i in range(len(fields)): if "id" in fields[i]: index_id = i elif "status" in fields[i]: @@ -51,7 +51,7 @@ class FuelAdapter(manager.DeploymentHandler): index_release_id = i # order env info - for i in range(2, len(lines) - 1): + for i in range(2, len(lines)): fields = lines[i].rsplit(' | ') dict = {"id": fields[index_id].strip(), "status": fields[index_status].strip(), @@ -61,88 +61,116 @@ class FuelAdapter(manager.DeploymentHandler): return environments - def nodes(self, options=None): + def get_nodes(self, options=None): + + if options and options['cluster'] and len(self.nodes) > 0: + n = [] + for node in self.nodes: + if node.info['cluster'] == options['cluster']: + n.append(node) + return n + + try: + # if we have retrieved previously all the nodes, don't do it again + # This fails the first time when the constructor calls this method + # therefore the try/except + if len(self.nodes) > 0: + return self.nodes + except: + pass + nodes = [] cmd = 'fuel node' output = self.installer_node.run_cmd(cmd) lines = output.rsplit('\n') if len(lines) < 2: logger.info("No nodes found in the deployment.") - return None - else: - # get fields indexes - fields = lines[0].rsplit(' | ') - - index_id = -1 - index_status = -1 - index_name = -1 - index_cluster = -1 - index_ip = -1 - index_mac = -1 - index_roles = -1 - index_online = -1 - - for i in range(0, len(fields) - 1): - if "id" in fields[i]: - index_id = i - elif "status" in fields[i]: - index_status = i - elif "name" in fields[i]: - index_name = i - elif "cluster" in fields[i]: - index_cluster = i - elif "ip" in fields[i]: - index_ip = i - elif "mac" in fields[i]: - index_mac = i - elif "roles " in fields[i]: - index_roles = i - elif "online" in fields[i]: - index_online = i - - # order nodes info - for i in range(2, len(lines) - 1): - fields = lines[i].rsplit(' | ') - - id = fields[index_id].strip(), - ip = fields[index_ip].strip() - status_node = fields[index_status].strip() - name = fields[index_name].strip() - roles = fields[index_roles].strip() - - dict = {"cluster": fields[index_cluster].strip(), - "mac": fields[index_mac].strip(), - "online": fields[index_online].strip()} - - if status_node == 'ready': - status = manager.Node.STATUS_OK - proxy = {'ip': self.installer_ip, - 'username': self.installer_user, - 'password': self.installer_pwd} - ssh_client = ssh_utils.get_ssh_client(hostname=ip, - username='root', - proxy=proxy) - else: - status = manager.Node.STATUS_INACTIVE - ssh_client = None - - node = manager.Node( - id, ip, name, status, roles, ssh_client, dict) + return nodes + + # get fields indexes + fields = lines[0].rsplit(' | ') + + index_id = -1 + index_status = -1 + index_name = -1 + index_cluster = -1 + index_ip = -1 + index_mac = -1 + index_roles = -1 + index_online = -1 + + for i in range(len(fields)): + if "group_id" in fields[i]: + break + elif "id" in fields[i]: + index_id = i + elif "status" in fields[i]: + index_status = i + elif "name" in fields[i]: + index_name = i + elif "cluster" in fields[i]: + index_cluster = i + elif "ip" in fields[i]: + index_ip = i + elif "mac" in fields[i]: + index_mac = i + elif "roles " in fields[i]: + index_roles = i + elif "online" in fields[i]: + index_online = i + + # order nodes info + for i in range(2, len(lines)): + fields = lines[i].rsplit(' | ') + id = fields[index_id].strip().encode() + ip = fields[index_ip].strip().encode() + status_node = fields[index_status].strip().encode().lower() + name = fields[index_name].strip().encode() + roles_all = fields[index_roles].strip().encode().lower() + + roles = [x for x in [manager.Role.CONTROLLER, + manager.Role.COMPUTE, + manager.Role.ODL] if x in roles_all] + + dict = {"cluster": fields[index_cluster].strip().encode(), + "mac": fields[index_mac].strip().encode(), + "status_node": status_node, + "online": fields[index_online].strip().encode()} + + ssh_client = None + if status_node == 'ready': + status = manager.NodeStatus.STATUS_OK + proxy = {'ip': self.installer_ip, + 'username': self.installer_user, + 'password': self.installer_pwd} + ssh_client = ssh_utils.get_ssh_client(hostname=ip, + username='root', + proxy=proxy) + elif 'error' in status_node: + status = manager.NodeStatus.STATUS_ERROR + elif 'off' in status_node: + status = manager.NodeStatus.STATUS_OFFLINE + elif 'discover' in status_node: + status = manager.NodeStatus.STATUS_UNUSED + else: + status = manager.NodeStatus.STATUS_INACTIVE + + node = manager.Node( + id, ip, name, status, roles, ssh_client, dict) + if options and options['cluster']: + if fields[index_cluster].strip() == options['cluster']: + nodes.append(node) + else: nodes.append(node) - # TODO: Add support for Fuel cluster selection - ''' - if options and options['cluster']: - if fields[index_cluster].strip() == options['cluster']: - ''' - + self.get_nodes_called = True return nodes def get_openstack_version(self): cmd = 'source openrc;nova-manage version 2>/dev/null' version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): version = node.run_cmd(cmd) break return version @@ -151,7 +179,7 @@ class FuelAdapter(manager.DeploymentHandler): cmd = "apt-cache show opendaylight|grep Version|sed 's/^.*\: //'" version = None for node in self.nodes: - if 'controller' in node.get_attribute('roles'): + if node.is_controller(): odl_version = node.run_cmd(cmd) if odl_version: version = 'OpenDaylight ' + odl_version diff --git a/modules/opnfv/deployment/manager.py b/modules/opnfv/deployment/manager.py index f0e442903..43a79488b 100644 --- a/modules/opnfv/deployment/manager.py +++ b/modules/opnfv/deployment/manager.py @@ -56,7 +56,7 @@ class Deployment(object): version = self.deployment_info['openstack_version'].split('.')[0] name = os_versions[version] return name - except Exception as e: + except Exception: return 'Unknown release' def get_dict(self): @@ -89,25 +89,35 @@ class Deployment(object): sdn_controller=self.deployment_info['sdn_controller']) for node in self.deployment_info['nodes']: - s += '\t\t{node_object}\n'.format(node_object=node) + s += '{node_object}\n'.format(node_object=node) return s -class Node(object): +class Role(): + CONTROLLER = 'controller' + COMPUTE = 'compute' + ODL = 'opendaylight' + ONOS = 'onos' + +class NodeStatus(): STATUS_OK = 'active' STATUS_INACTIVE = 'inactive' STATUS_OFFLINE = 'offline' - STATUS_FAILED = 'failed' + STATUS_ERROR = 'error' + STATUS_UNUSED = 'unused' + + +class Node(object): def __init__(self, id, ip, name, status, - roles, - ssh_client, + roles=[], + ssh_client=None, info={}): self.id = id self.ip = ip @@ -121,7 +131,7 @@ class Node(object): ''' SCP file from a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Fetching %s from %s" % (src, self.ip)) @@ -137,7 +147,7 @@ class Node(object): ''' SCP file to a node ''' - if self.status is not Node.STATUS_OK: + if self.status is not NodeStatus.STATUS_OK: logger.info("The node %s is not active" % self.ip) return 1 logger.info("Copying %s to %s" % (src, self.ip)) @@ -153,9 +163,9 @@ class Node(object): ''' Run command remotely on a node ''' - if self.status is not Node.STATUS_OK: - logger.info("The node %s is not active" % self.ip) - return 1 + if self.status is not NodeStatus.STATUS_OK: + logger.error("The node %s is not active" % self.ip) + return None _, stdout, stderr = (self.ssh_client.exec_command(cmd)) error = stderr.readlines() if len(error) > 0: @@ -187,7 +197,7 @@ class Node(object): ''' Returns if the node is a controller ''' - if 'controller' in self.get_attribute('roles'): + if 'controller' in self.roles: return True return False @@ -195,12 +205,32 @@ class Node(object): ''' Returns if the node is a compute ''' - if 'compute' in self.get_attribute('roles'): + if 'compute' in self.roles: return True return False + def get_ovs_info(self): + ''' + Returns the ovs version installed + ''' + cmd = "ovs-vsctl --version|head -1| sed 's/^.*) //'" + return self.run_cmd(cmd) + def __str__(self): - return str(self.get_dict()) + return ''' + name: {name} + id: {id} + ip: {ip} + status: {status} + roles: {roles} + ovs: {ovs} + info: {info}'''.format(name=self.name, + id=self.id, + ip=self.ip, + status=self.status, + roles=self.roles, + ovs=self.get_ovs_info(), + info=self.info) class DeploymentHandler(object): @@ -236,14 +266,14 @@ class DeploymentHandler(object): self.installer_node = Node(id='', ip=installer_ip, name=installer, - status='active', + status=NodeStatus.STATUS_OK, ssh_client=self.installer_connection, roles='installer node') else: raise Exception( 'Cannot establish connection to the installer node!') - self.nodes = self.nodes() + self.nodes = self.get_nodes() @abstractmethod def get_openstack_version(self): @@ -267,18 +297,12 @@ class DeploymentHandler(object): raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED) @abstractmethod - def nodes(self, options=None): + def get_nodes(self, options=None): ''' Generates a list of all the nodes in the deployment ''' raise Exception(DeploymentHandler.FUNCTION_NOT_IMPLEMENTED) - def get_nodes(self, options=None): - ''' - Returns the list of Node objects - ''' - return self.nodes - def get_installer_node(self): ''' Returns the installer node object @@ -296,4 +320,4 @@ class DeploymentHandler(object): pod=os.getenv('NODE_NAME', 'Unknown'), openstack_version=self.get_openstack_version(), sdn_controller=self.get_sdn_version(), - nodes=self.nodes) + nodes=self.get_nodes()) diff --git a/modules/opnfv/utils/Credentials.py b/modules/opnfv/utils/Credentials.py index 6441b841c..141ecbd93 100644 --- a/modules/opnfv/utils/Credentials.py +++ b/modules/opnfv/utils/Credentials.py @@ -77,7 +77,7 @@ class Credentials(object): creds_file = '/root/openrc' try: self.handler.get_file_from_controller(creds_file, target_path) - except Exception, e: + except Exception as e: self.logger.error( "Cannot get %s from controller. %e" % (creds_file, e)) pass diff --git a/modules/opnfv/utils/constants.py b/modules/opnfv/utils/constants.py index ed83488d4..56008c37f 100644 --- a/modules/opnfv/utils/constants.py +++ b/modules/opnfv/utils/constants.py @@ -14,6 +14,7 @@ EXIT_OK = 0 EXIT_RUN_ERROR = -1 EXIT_PUSH_TO_TEST_DB_ERROR = -2 + class Constants(object): INSTALLERS = ['apex', 'fuel', 'compass', 'joid', "daisy"] VERSIONS = ['arno', 'brahmaputra', 'colorado', 'danube'] diff --git a/modules/opnfv/utils/ovs_logger.py b/modules/opnfv/utils/ovs_logger.py index 3159609f1..75b4cec80 100644 --- a/modules/opnfv/utils/ovs_logger.py +++ b/modules/opnfv/utils/ovs_logger.py @@ -16,6 +16,7 @@ logger = OPNFVLogger.Logger('ovs_logger').getLogger() class OVSLogger(object): + def __init__(self, basedir, ft_resdir): self.ovs_dir = basedir self.ft_resdir = ft_resdir @@ -32,7 +33,7 @@ class OVSLogger(object): hosts = stdout.readline().strip().split(' ') found_host = [h for h in hosts if h.startswith(host_prefix)][0] return found_host - except Exception, e: + except Exception as e: logger.error(e) def __dump_to_file(self, operation, host, text, timestamp=None): @@ -55,7 +56,7 @@ class OVSLogger(object): .format(cmd, host)) output = ''.join(stdout.readlines()) return output - except Exception, e: + except Exception as e: logger.error('[__remote_command(ssh_client, {0})]: {1}' .format(cmd, e)) return None @@ -78,7 +79,7 @@ class OVSLogger(object): host = self.__ssh_host(ssh_conn) self.__dump_to_file(operation, host, output, timestamp=timestamp) return output - except Exception, e: + except Exception as e: logger.error('[ofctl_dump_flows(ssh_client, {0}, {1})]: {2}' .format(br, choose_table, e)) return None @@ -91,7 +92,7 @@ class OVSLogger(object): host = self.__ssh_host(ssh_conn) self.__dump_to_file(operation, host, output, timestamp=timestamp) return output - except Exception, e: + except Exception as e: logger.error('[vsctl_show(ssh_client)]: {0}'.format(e)) return None diff --git a/modules/opnfv/utils/ssh_utils.py b/modules/opnfv/utils/ssh_utils.py index f90045540..d17f5ae81 100644 --- a/modules/opnfv/utils/ssh_utils.py +++ b/modules/opnfv/utils/ssh_utils.py @@ -47,7 +47,7 @@ def get_ssh_client(hostname, password=password) return client - except Exception, e: + except Exception as e: logger.error(e) return None @@ -57,7 +57,7 @@ def get_file(ssh_conn, src, dest): sftp = ssh_conn.open_sftp() sftp.get(src, dest) return True - except Exception, e: + except Exception as e: logger.error("Error [get_file(ssh_conn, '%s', '%s']: %s" % (src, dest, e)) return None @@ -68,7 +68,7 @@ def put_file(ssh_conn, src, dest): sftp = ssh_conn.open_sftp() sftp.put(src, dest) return True - except Exception, e: + except Exception as e: logger.error("Error [put_file(ssh_conn, '%s', '%s']: %s" % (src, dest, e)) return None @@ -128,5 +128,5 @@ class ProxyHopClient(paramiko.SSHClient): pkey=proxy_key, sock=self.proxy_channel) os.remove(self.local_ssh_key) - except Exception, e: + except Exception as e: logger.error(e) diff --git a/utils/lab-reconfiguration/reconfigUcsNet.py b/utils/lab-reconfiguration/reconfigUcsNet.py index 4c08f3dc9..0dd902f6d 100755 --- a/utils/lab-reconfiguration/reconfigUcsNet.py +++ b/utils/lab-reconfiguration/reconfigUcsNet.py @@ -22,8 +22,10 @@ # -p PASSWORD, --password=PASSWORD # [Mandatory] Account Password for UCSM Login # -f FILE, --file=FILE -# [Optional] Yaml file with network config you want to set for POD -# If not present only current network config will be printed +# [Optional] Yaml file with network config you +# want to set for POD +# If not present only current network config +# will be printed # import getpass @@ -32,12 +34,14 @@ import platform import yaml import time import sys -from UcsSdk import * -from collections import defaultdict +from UcsSdk import LsmaintAck, LsPower, LsServer, OrgOrg +from UcsSdk import UcsHandle, VnicEther, VnicEtherIf, YesOrNo + POD_PREFIX = "POD-2" INSTALLER = "POD-21" + def getpassword(prompt): if platform.system() == "Linux": return getpass.unix_getpass(prompt=prompt) @@ -51,7 +55,8 @@ def get_servers(handle=None): """ Return list of servers """ - orgObj = handle.GetManagedObject(None, OrgOrg.ClassId(), {OrgOrg.DN : "org-root"})[0] + orgObj = handle.GetManagedObject( + None, OrgOrg.ClassId(), {OrgOrg.DN: "org-root"})[0] servers = handle.GetManagedObject(orgObj, LsServer.ClassId()) for server in servers: if server.Type == 'instance' and POD_PREFIX in server.Dn: @@ -63,10 +68,10 @@ def set_boot_policy(handle=None, server=None, policy=None): Modify Boot policy of server """ obj = handle.GetManagedObject(None, LsServer.ClassId(), { - LsServer.DN: server.Dn}) + LsServer.DN: server.Dn}) handle.SetManagedObject(obj, LsServer.ClassId(), { - LsServer.BOOT_POLICY_NAME: policy} ) - print " Configured boot policy: {}".format(policy) + LsServer.BOOT_POLICY_NAME: policy}) + print(" Configured boot policy: {}".format(policy)) def ack_pending(handle=None, server=None): @@ -74,30 +79,32 @@ def ack_pending(handle=None, server=None): Acknowledge pending state of server """ handle.AddManagedObject(server, LsmaintAck.ClassId(), { - LsmaintAck.DN: server.Dn + "/ack", - LsmaintAck.DESCR:"", - LsmaintAck.ADMIN_STATE:"trigger-immediate", - LsmaintAck.SCHEDULER:"", - LsmaintAck.POLICY_OWNER:"local"}, True) - print " Pending-reboot -> Acknowledged." + LsmaintAck.DN: server.Dn + "/ack", + LsmaintAck.DESCR: "", + LsmaintAck.ADMIN_STATE: "trigger-immediate", + LsmaintAck.SCHEDULER: "", + LsmaintAck.POLICY_OWNER: "local"}, True) + print(" Pending-reboot -> Acknowledged.") def boot_server(handle=None, server=None): """ Boot server (when is in power-off state) """ - obj = handle.GetManagedObject(None, LsServer.ClassId(), {LsServer.DN: server.Dn}) + obj = handle.GetManagedObject( + None, LsServer.ClassId(), {LsServer.DN: server.Dn}) handle.AddManagedObject(obj, LsPower.ClassId(), { - LsPower.DN: server.Dn + "/power", - LsPower.STATE:"admin-up"}, True) - print " Booting." + LsPower.DN: server.Dn + "/power", + LsPower.STATE: "admin-up"}, True) + print(" Booting.") def get_vnics(handle=None, server=None): """ Return list of vnics for given server """ - vnics = handle.ConfigResolveChildren(VnicEther.ClassId(), server.Dn, None, YesOrNo.TRUE) + vnics = handle.ConfigResolveChildren( + VnicEther.ClassId(), server.Dn, None, YesOrNo.TRUE) return vnics.OutConfigs.GetChild() @@ -105,28 +112,36 @@ def get_network_config(handle=None): """ Print current network config """ - print "\nCURRENT NETWORK CONFIG:" - print " d - default, t - tagged" + print("\nCURRENT NETWORK CONFIG:") + print(" d - default, t - tagged") for server in get_servers(handle): - print ' {}'.format(server.Name) - print ' Boot policy: {}'.format(server.OperBootPolicyName) + print(' {}'.format(server.Name)) + print(' Boot policy: {}'.format(server.OperBootPolicyName)) for vnic in get_vnics(handle, server): - print ' {}'.format(vnic.Name) - print ' {}'.format(vnic.Addr) - vnicIfs = handle.ConfigResolveChildren(VnicEtherIf.ClassId(), vnic.Dn, None, YesOrNo.TRUE) + print(' {}'.format(vnic.Name)) + print(' {}'.format(vnic.Addr)) + vnicIfs = handle.ConfigResolveChildren( + VnicEtherIf.ClassId(), vnic.Dn, None, YesOrNo.TRUE) for vnicIf in vnicIfs.OutConfigs.GetChild(): if vnicIf.DefaultNet == 'yes': - print ' Vlan: {}d'.format(vnicIf.Vnet) + print(' Vlan: {}d'.format(vnicIf.Vnet)) else: - print ' Vlan: {}t'.format(vnicIf.Vnet) + print(' Vlan: {}t'.format(vnicIf.Vnet)) -def add_interface(handle=None, lsServerDn=None, vnicEther=None, templName=None, order=None, macAddr=None): +def add_interface(handle=None, + lsServerDn=None, + vnicEther=None, + templName=None, + order=None, + macAddr=None): """ Add interface to server specified by server.DN name """ - print " Adding interface: {}, template: {}, server.Dn: {}".format(vnicEther, templName, lsServerDn) - obj = handle.GetManagedObject(None, LsServer.ClassId(), {LsServer.DN:lsServerDn}) + print(" Adding interface: {}, template: {}, server.Dn: {}".format( + vnicEther, templName, lsServerDn)) + obj = handle.GetManagedObject( + None, LsServer.ClassId(), {LsServer.DN: lsServerDn}) vnicEtherDn = lsServerDn + "/ether-" + vnicEther params = { VnicEther.STATS_POLICY_NAME: "default", @@ -146,8 +161,9 @@ def remove_interface(handle=None, vnicEtherDn=None): """ Remove interface specified by Distinguished Name (vnicEtherDn) """ - print " Removing interface: {}".format(vnicEtherDn) - obj = handle.GetManagedObject(None, VnicEther.ClassId(), {VnicEther.DN:vnicEtherDn}) + print(" Removing interface: {}".format(vnicEtherDn)) + obj = handle.GetManagedObject( + None, VnicEther.ClassId(), {VnicEther.DN: vnicEtherDn}) handle.RemoveManagedObject(obj) @@ -165,32 +181,37 @@ def set_network(handle=None, yamlFile=None): Configure VLANs on POD according specified network """ # add interfaces and bind them with vNIC templates - print "\nRECONFIGURING VNICs..." + print("\nRECONFIGURING VNICs...") pod_data = read_yaml_file(yamlFile) network = pod_data['network'] for index, server in enumerate(get_servers(handle)): # Assign template to interface for iface, data in network.iteritems(): - add_interface(handle, server.Dn, iface, data['template'], data['order'], data['mac-list'][index]) + add_interface(handle, server.Dn, iface, data['template'], data[ + 'order'], data['mac-list'][index]) - # Remove other interfaces which have not assigned required vnic template + # Remove other interfaces which have not assigned required vnic + # template vnics = get_vnics(handle, server) for vnic in vnics: - if not any(data['template'] in vnic.OperNwTemplName for iface, data in network.iteritems()): + if not any(data['template'] in vnic.OperNwTemplName for + iface, data in network.iteritems()): remove_interface(handle, vnic.Dn) - print " {} removed, template: {}".format(vnic.Name, vnic.OperNwTemplName) + print(" {} removed, template: {}".format( + vnic.Name, vnic.OperNwTemplName)) # Set boot policy template - if not INSTALLER in server.Dn: + if INSTALLER not in server.Dn: set_boot_policy(handle, server, pod_data['boot-policy']) if __name__ == "__main__": - print "\n*** SKIPING RECONFIGURATION.***\n" + print("\n*** SKIPING RECONFIGURATION.***\n") sys.exit(0) # Latest urllib2 validate certs by default - # The process wide "revert to the old behaviour" hook is to monkeypatch the ssl module + # The process wide "revert to the old behaviour" hook is to monkeypatch + # the ssl module # https://bugs.python.org/issue22417 import ssl if hasattr(ssl, '_create_unverified_context'): @@ -198,14 +219,15 @@ if __name__ == "__main__": try: handle = UcsHandle() parser = optparse.OptionParser() - parser.add_option('-i', '--ip',dest="ip", - help="[Mandatory] UCSM IP Address") - parser.add_option('-u', '--username',dest="userName", - help="[Mandatory] Account Username for UCSM Login") - parser.add_option('-p', '--password',dest="password", - help="[Mandatory] Account Password for UCSM Login") - parser.add_option('-f', '--file',dest="yamlFile", - help="[Optional] Yaml file contains network config you want to set on UCS POD1") + parser.add_option('-i', '--ip', dest="ip", + help="[Mandatory] UCSM IP Address") + parser.add_option('-u', '--username', dest="userName", + help="[Mandatory] Account Username for UCSM Login") + parser.add_option('-p', '--password', dest="password", + help="[Mandatory] Account Password for UCSM Login") + parser.add_option('-f', '--file', dest="yamlFile", + help=("[Optional] Yaml file contains network " + "config you want to set on UCS POD1")) (options, args) = parser.parse_args() if not options.ip: @@ -215,26 +237,27 @@ if __name__ == "__main__": parser.print_help() parser.error("Provide UCSM UserName") if not options.password: - options.password=getpassword("UCSM Password:") + options.password = getpassword("UCSM Password:") handle.Login(options.ip, options.userName, options.password) # Change vnic template if specified in cli option - if (options.yamlFile != None): + if (options.yamlFile is not None): set_network(handle, options.yamlFile) time.sleep(5) - print "\nWait until Overall Status of all nodes is OK..." - timeout = time.time() + 60*10 #10 minutes timeout + print("\nWait until Overall Status of all nodes is OK...") + timeout = time.time() + 60 * 10 # 10 minutes timeout while True: list_of_states = [] for server in get_servers(handle): if server.OperState == "power-off": - boot_server(handle,server) + boot_server(handle, server) if server.OperState == "pending-reboot": - ack_pending(handle,server) + ack_pending(handle, server) list_of_states.append(server.OperState) - print " {}, {} seconds remains.".format(list_of_states, round(timeout-time.time())) + print(" {}, {} seconds remains.".format( + list_of_states, round(timeout - time.time()))) if all(state == "ok" for state in list_of_states): break if time.time() > timeout: @@ -246,11 +269,12 @@ if __name__ == "__main__": handle.Logout() - except Exception, err: + except Exception as err: handle.Logout() - print "Exception:", str(err) - import traceback, sys - print '-'*60 + print("Exception:", str(err)) + import traceback + import sys + print('-' * 60) traceback.print_exc(file=sys.stdout) - print '-'*60 + print('-' * 60) sys.exit(1) diff --git a/utils/opnfv-artifacts.py b/utils/opnfv-artifacts.py index 876efedba..2f2cc41ba 100644 --- a/utils/opnfv-artifacts.py +++ b/utils/opnfv-artifacts.py @@ -28,56 +28,55 @@ from apiclient.errors import HttpError import argparse import json -import os import sys api = { - 'projects': {}, - 'docs': {}, - 'releases': {}, + 'projects': {}, + 'docs': {}, + 'releases': {}, } releases = [ - 'arno.2015.1.0', - 'arno.2015.2.0', - 'brahmaputra.1.0', + 'arno.2015.1.0', + 'arno.2015.2.0', + 'brahmaputra.1.0', ] # List of file extensions to filter out ignore_extensions = [ - '.buildinfo', - '.woff', - '.ttf', - '.svg', - '.eot', - '.pickle', - '.doctree', - '.js', - '.png', - '.css', - '.gif', - '.jpeg', - '.jpg', - '.bmp', + '.buildinfo', + '.woff', + '.ttf', + '.svg', + '.eot', + '.pickle', + '.doctree', + '.js', + '.png', + '.css', + '.gif', + '.jpeg', + '.jpg', + '.bmp', ] parser = argparse.ArgumentParser( - description='OPNFV Artifacts JSON Generator') + description='OPNFV Artifacts JSON Generator') parser.add_argument( - '-k', - dest='key', - default='', - help='API Key for Google Cloud Storage') + '-k', + dest='key', + default='', + help='API Key for Google Cloud Storage') parser.add_argument( - '-p', - default=None, - dest='pretty', - action='store_const', - const=2, - help='pretty print the output') + '-p', + default=None, + dest='pretty', + action='store_const', + const=2, + help='pretty print the output') # Parse and assign arguments args = parser.parse_args() @@ -130,7 +129,6 @@ def has_logs(gerrit_review): return False - def has_ignorable_extension(filename): for extension in ignore_extensions: if filename.lower().endswith(extension): @@ -148,11 +146,11 @@ def get_results(key): files = storage.objects().list(bucket='artifacts.opnfv.org', fields='nextPageToken,' 'items(' - 'name,' - 'mediaLink,' - 'updated,' - 'contentType,' - 'size' + 'name,' + 'mediaLink,' + 'updated,' + 'contentType,' + 'size' ')') while (files is not None): sites = files.execute() @@ -173,7 +171,8 @@ def get_results(key): project = site_split[0] name = '/'.join(site_split[1:]) - proxy = "http://build.opnfv.org/artifacts.opnfv.org/%s" % site['name'] + proxy = "http://build.opnfv.org/artifacts.opnfv.org/%s" % site[ + 'name'] if name.endswith('.html'): href = "http://artifacts.opnfv.org/%s" % site['name'] href_type = 'view' @@ -183,7 +182,7 @@ def get_results(key): gerrit = has_gerrit_review(site_split) logs = False # has_logs(gerrit) - documentation = has_documentation(site_split) + # documentation = has_documentation(site_split) release = has_release(site_split) category = 'project' diff --git a/utils/push-test-logs.sh b/utils/push-test-logs.sh index 09861c45f..5e428d07b 100644 --- a/utils/push-test-logs.sh +++ b/utils/push-test-logs.sh @@ -28,7 +28,7 @@ node_list=(\ 'ericsson-virtual4' 'ericsson-virtual5' \ 'arm-pod1' 'arm-pod3' \ 'huawei-pod1' 'huawei-pod2' 'huawei-pod3' 'huawei-pod4' 'huawei-pod5' \ -'huawei-pod6' 'huawei-pod7' 'huawei-pod12'\ +'huawei-pod6' 'huawei-pod7' 'huawei-pod12' \ 'huawei-virtual1' 'huawei-virtual2' 'huawei-virtual3' 'huawei-virtual4') diff --git a/utils/test/dashboard/dashboard/common/elastic_access.py b/utils/test/dashboard/dashboard/common/elastic_access.py index aaf776f7a..eb29ce879 100644 --- a/utils/test/dashboard/dashboard/common/elastic_access.py +++ b/utils/test/dashboard/dashboard/common/elastic_access.py @@ -30,7 +30,7 @@ def publish_docs(url, creds=None, body=None): def _get_docs_nr(url, creds=None, body=None): res_data = _get('{}/_search?size=0'.format(url), creds=creds, body=body) - print type(res_data), res_data + print(type(res_data), res_data) return res_data['hits']['total'] diff --git a/utils/test/dashboard/dashboard/conf/testcases.py b/utils/test/dashboard/dashboard/conf/testcases.py index ff801b4c9..98ce20984 100644 --- a/utils/test/dashboard/dashboard/conf/testcases.py +++ b/utils/test/dashboard/dashboard/conf/testcases.py @@ -21,4 +21,4 @@ def get_format(project, case): if __name__ == '__main__': fmt = get_format('functest', 'vping_ssh') - print fmt + print(fmt) diff --git a/utils/test/dashboard/dashboard/elastic2kibana/utility.py b/utils/test/dashboard/dashboard/elastic2kibana/utility.py index 55578bd8c..40d9202a6 100644 --- a/utils/test/dashboard/dashboard/elastic2kibana/utility.py +++ b/utils/test/dashboard/dashboard/elastic2kibana/utility.py @@ -2,7 +2,8 @@ import json from jinja2 import Environment, PackageLoader -env = Environment(loader=PackageLoader('dashboard', 'elastic2kibana/templates')) +env = Environment(loader=PackageLoader('dashboard', + 'elastic2kibana/templates')) env.filters['jsonify'] = json.dumps diff --git a/utils/test/dashboard/dashboard/functest/format.py b/utils/test/dashboard/dashboard/functest/format.py index ef485bae0..75d361ff8 100644 --- a/utils/test/dashboard/dashboard/functest/format.py +++ b/utils/test/dashboard/dashboard/functest/format.py @@ -6,7 +6,8 @@ def _convert_value(value): def _convert_duration(duration): - if (isinstance(duration, str) or isinstance(duration, unicode)) and ':' in duration: + if ((isinstance(duration, str) or + isinstance(duration, unicode)) and ':' in duration): hours, minutes, seconds = duration.split(":") hours = _convert_value(hours) minutes = _convert_value(minutes) @@ -42,11 +43,11 @@ def format_normal(testcase): testcase_tests = float(testcase_details['tests']) testcase_failures = float(testcase_details['failures']) if testcase_tests != 0: - testcase_details['success_percentage'] = 100 * (testcase_tests - testcase_failures) / testcase_tests + testcase_details['success_percentage'] = 100 * \ + (testcase_tests - testcase_failures) / testcase_tests else: testcase_details['success_percentage'] = 0 - return found @@ -115,28 +116,33 @@ def format_onos(testcase): """ testcase_details = testcase['details'] - if 'FUNCvirNet' not in testcase_details or 'FUNCvirNetL3' not in testcase_details: + if ('FUNCvirNet' not in testcase_details or + 'FUNCvirNetL3' not in testcase_details): return False funcvirnet_details = testcase_details['FUNCvirNet']['status'] - funcvirnet_stats = _get_statistics(funcvirnet_details, ('Case result',), ('PASS', 'FAIL')) + funcvirnet_stats = _get_statistics( + funcvirnet_details, ('Case result',), ('PASS', 'FAIL')) funcvirnet_passed = funcvirnet_stats['PASS'] funcvirnet_failed = funcvirnet_stats['FAIL'] funcvirnet_all = funcvirnet_passed + funcvirnet_failed funcvirnetl3_details = testcase_details['FUNCvirNetL3']['status'] - funcvirnetl3_stats = _get_statistics(funcvirnetl3_details, ('Case result',), ('PASS', 'FAIL')) + funcvirnetl3_stats = _get_statistics( + funcvirnetl3_details, ('Case result',), ('PASS', 'FAIL')) funcvirnetl3_passed = funcvirnetl3_stats['PASS'] funcvirnetl3_failed = funcvirnetl3_stats['FAIL'] funcvirnetl3_all = funcvirnetl3_passed + funcvirnetl3_failed testcase_details['FUNCvirNet'] = { - 'duration': _convert_duration(testcase_details['FUNCvirNet']['duration']), + 'duration': + _convert_duration(testcase_details['FUNCvirNet']['duration']), 'tests': funcvirnet_all, 'failures': funcvirnet_failed } testcase_details['FUNCvirNetL3'] = { - 'duration': _convert_duration(testcase_details['FUNCvirNetL3']['duration']), + 'duration': + _convert_duration(testcase_details['FUNCvirNetL3']['duration']), 'tests': funcvirnetl3_all, 'failures': funcvirnetl3_failed } diff --git a/utils/test/dashboard/dashboard/mongo2elastic/main.py b/utils/test/dashboard/dashboard/mongo2elastic/main.py index 688f55f7d..e33252df2 100644 --- a/utils/test/dashboard/dashboard/mongo2elastic/main.py +++ b/utils/test/dashboard/dashboard/mongo2elastic/main.py @@ -27,7 +27,8 @@ parser.add_argument('-ld', '--latest-days', metavar='N', help='get entries old at most N days from mongodb and' ' parse those that are not already in elasticsearch.' - ' If not present, will get everything from mongodb, which is the default') + ' If not present, will get everything from mongodb,' + ' which is the default') args = parser.parse_args() CONF = APIConfig().parse(args.config_file) @@ -37,6 +38,7 @@ tmp_docs_file = './mongo-{}.json'.format(uuid.uuid4()) class DocumentVerification(object): + def __init__(self, doc): super(DocumentVerification, self).__init__() self.doc = doc @@ -55,8 +57,8 @@ class DocumentVerification(object): for key, value in self.doc.items(): if key in mandatory_fields: if value is None: - logger.info("Skip testcase '%s' because field '%s' missing" % - (self.doc_id, key)) + logger.info("Skip testcase '%s' because field " + "'%s' missing" % (self.doc_id, key)) self.skip = True else: mandatory_fields.remove(key) @@ -131,10 +133,12 @@ class DocumentPublisher(object): self._publish() def _publish(self): - status, data = elastic_access.publish_docs(self.elastic_url, self.creds, self.doc) + status, data = elastic_access.publish_docs( + self.elastic_url, self.creds, self.doc) if status > 300: logger.error('Publish record[{}] failed, due to [{}]' - .format(self.doc, json.loads(data)['error']['reason'])) + .format(self.doc, + json.loads(data)['error']['reason'])) def _fix_date(self, date_string): if isinstance(date_string, dict): @@ -163,7 +167,8 @@ class DocumentsPublisher(object): def export(self): if self.days > 0: - past_time = datetime.datetime.today() - datetime.timedelta(days=self.days) + past_time = datetime.datetime.today( + ) - datetime.timedelta(days=self.days) query = '''{{ "project_name": "{}", "case_name": "{}", @@ -182,7 +187,7 @@ class DocumentsPublisher(object): try: subprocess.check_call(cmd) return self - except Exception, err: + except Exception as err: logger.error("export mongodb failed: %s" % err) self._remove() exit(-1) @@ -217,7 +222,8 @@ class DocumentsPublisher(object): }}'''.format(self.project, self.case, self.days) else: raise Exception('Update days must be non-negative') - self.existed_docs = elastic_access.get_docs(self.elastic_url, self.creds, body) + self.existed_docs = elastic_access.get_docs( + self.elastic_url, self.creds, body) return self def publish(self): diff --git a/utils/test/dashboard/kibana_cleanup.py b/utils/test/dashboard/kibana_cleanup.py index ee0190049..7e3662c29 100644 --- a/utils/test/dashboard/kibana_cleanup.py +++ b/utils/test/dashboard/kibana_cleanup.py @@ -9,7 +9,8 @@ from dashboard.common import elastic_access logger = logging.getLogger('clear_kibana') logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler('/var/log/{}.log'.format('clear_kibana')) -file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s')) +file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: ' + '%(message)s')) logger.addHandler(file_handler) @@ -21,12 +22,17 @@ def delete_all(url, es_creds): if __name__ == '__main__': - parser = argparse.ArgumentParser(description='Delete saved kibana searches, visualizations and dashboards') - parser.add_argument('-e', '--elasticsearch-url', default='http://localhost:9200', - help='the url of elasticsearch, defaults to http://localhost:9200') + parser = argparse.ArgumentParser( + description=('Delete saved kibana searches, ' + 'visualizations and dashboards')) + parser.add_argument('-e', '--elasticsearch-url', + default='http://localhost:9200', + help=('the url of elasticsearch, ' + 'defaults to http://localhost:9200')) parser.add_argument('-u', '--elasticsearch-username', default=None, - help='The username with password for elasticsearch in format username:password') + help=('The username with password for elasticsearch ' + 'in format username:password')) args = parser.parse_args() base_elastic_url = args.elasticsearch_url @@ -38,4 +44,3 @@ if __name__ == '__main__': for url in urls: delete_all(url, es_creds) - diff --git a/utils/test/reporting/functest/reporting-status.py b/utils/test/reporting/functest/reporting-status.py index 158ee597b..df5632335 100755 --- a/utils/test/reporting/functest/reporting-status.py +++ b/utils/test/reporting/functest/reporting-status.py @@ -61,13 +61,13 @@ logger.info("*******************************************") # Retrieve test cases of Tier 1 (smoke) config_tiers = functest_yaml_config.get("tiers") -# we consider Tier 1 (smoke),2 (features) +# we consider Tier 0 (Healthcheck), Tier 1 (smoke),2 (features) # to validate scenarios -# Tier > 4 are not used to validate scenarios but we display the results anyway +# Tier > 2 are not used to validate scenarios but we display the results anyway # tricky thing for the API as some tests are Functest tests # other tests are declared directly in the feature projects for tier in config_tiers: - if tier['order'] > 0 and tier['order'] < 2: + if tier['order'] >= 0 and tier['order'] < 2: for case in tier['testcases']: if case['name'] not in blacklist: testValid.append(tc.TestCase(case['name'], diff --git a/utils/test/reporting/functest/reporting-tempest.py b/utils/test/reporting/functest/reporting-tempest.py index 5d6bcc062..6e6585a32 100755 --- a/utils/test/reporting/functest/reporting-tempest.py +++ b/utils/test/reporting/functest/reporting-tempest.py @@ -44,7 +44,7 @@ for version in rp_utils.get_config('general.versions'): response = urlopen(request) k = response.read() results = json.loads(k) - except URLError, e: + except URLError as e: logger.error("Error code: %s" % e) test_results = results['results'] @@ -73,9 +73,9 @@ for version in rp_utils.get_config('general.versions'): nb_tests_run = result['details']['tests'] nb_tests_failed = result['details']['failures'] if nb_tests_run != 0: - success_rate = 100*((int(nb_tests_run) - - int(nb_tests_failed)) / - int(nb_tests_run)) + success_rate = 100 * ((int(nb_tests_run) - + int(nb_tests_failed)) / + int(nb_tests_run)) else: success_rate = 0 diff --git a/utils/test/reporting/functest/reporting-vims.py b/utils/test/reporting/functest/reporting-vims.py index 2077d2a4a..b236b8963 100755 --- a/utils/test/reporting/functest/reporting-vims.py +++ b/utils/test/reporting/functest/reporting-vims.py @@ -51,7 +51,7 @@ for version in versions: response = urlopen(request) k = response.read() results = json.loads(k) - except URLError, e: + except URLError as e: logger.error("Error code: %s" % e) test_results = results['results'] @@ -91,7 +91,7 @@ for version in versions: result['pr_step_ok'] = 0 if nb_step != 0: - result['pr_step_ok'] = (float(nb_step_ok)/nb_step)*100 + result['pr_step_ok'] = (float(nb_step_ok) / nb_step) * 100 try: logger.debug("Scenario %s, Installer %s" % (s_result[1]['scenario'], installer)) diff --git a/utils/test/reporting/functest/testCase.py b/utils/test/reporting/functest/testCase.py index 8d90fc861..22196c86b 100644 --- a/utils/test/reporting/functest/testCase.py +++ b/utils/test/reporting/functest/testCase.py @@ -36,14 +36,15 @@ class TestCase(object): 'moon': 'Moon', 'copper': 'Copper', 'security_scan': 'Security', - 'multisite':'Multisite', - 'domino':'Domino', - 'odl-sfc':'SFC', - 'onos_sfc':'SFC', - 'parser':'Parser', - 'connection_check':'Health (connection)', - 'api_check':'Health (api)', - 'snaps_smoke':'SNAPS' } + 'multisite': 'Multisite', + 'domino': 'Domino', + 'odl-sfc': 'SFC', + 'onos_sfc': 'SFC', + 'parser': 'Parser', + 'connection_check': 'Health (connection)', + 'api_check': 'Health (api)', + 'snaps_smoke': 'SNAPS', + 'snaps_health_check': 'Health (dhcp)'} try: self.displayName = display_name_matrix[self.name] except: @@ -131,14 +132,15 @@ class TestCase(object): 'moon': 'moon_authentication', 'copper': 'copper-notification', 'security_scan': 'security', - 'multisite':'multisite', - 'domino':'domino-multinode', - 'odl-sfc':'functest-odl-sfc', - 'onos_sfc':'onos_sfc', - 'parser':'parser-basics', - 'connection_check':'connection_check', - 'api_check':'api_check', - 'snaps_smoke':'snaps_smoke' + 'multisite': 'multisite', + 'domino': 'domino-multinode', + 'odl-sfc': 'functest-odl-sfc', + 'onos_sfc': 'onos_sfc', + 'parser': 'parser-basics', + 'connection_check': 'connection_check', + 'api_check': 'api_check', + 'snaps_smoke': 'snaps_smoke', + 'snaps_health_check': 'snaps_health_check' } try: return test_match_matrix[self.name] @@ -147,4 +149,3 @@ class TestCase(object): def getDisplayName(self): return self.displayName - diff --git a/utils/test/reporting/reporting.yaml b/utils/test/reporting/reporting.yaml index 9db0890b2..2fb6b7831 100644 --- a/utils/test/reporting/reporting.yaml +++ b/utils/test/reporting/reporting.yaml @@ -36,12 +36,20 @@ functest: - ovno - security_scan - rally_sanity + - healthcheck + - odl_netvirt + - aaa + - cloudify_ims + - orchestra_ims + - juju_epc + - orchestra + - promise max_scenario_criteria: 50 test_conf: https://git.opnfv.org/cgit/functest/plain/functest/ci/testcases.yaml log_level: ERROR jenkins_url: https://build.opnfv.org/ci/view/functest/job/ exclude_noha: False - exclude_virtual: True + exclude_virtual: False yardstick: test_conf: https://git.opnfv.org/cgit/yardstick/plain/tests/ci/report_config.yaml diff --git a/utils/test/reporting/utils/reporting_utils.py b/utils/test/reporting/utils/reporting_utils.py index da979531b..1879fb628 100644 --- a/utils/test/reporting/utils/reporting_utils.py +++ b/utils/test/reporting/utils/reporting_utils.py @@ -93,8 +93,8 @@ def getApiResults(case, installer, scenario, version): response = urlopen(request) k = response.read() results = json.loads(k) - except URLError, e: - print 'No kittez. Got an error code:', e + except URLError as e: + print('No kittez. Got an error code:', e) return results @@ -115,8 +115,8 @@ def getScenarios(case, installer, version): k = response.read() results = json.loads(k) test_results = results['results'] - except URLError, e: - print 'Got an error code:', e + except URLError as e: + print('Got an error code:', e) if test_results is not None: test_results.reverse() @@ -132,8 +132,8 @@ def getScenarios(case, installer, version): exclude_virtual_pod = get_config('functest.exclude_virtual') exclude_noha = get_config('functest.exclude_noha') if ((exclude_virtual_pod and "virtual" in r['pod_name']) or - (exclude_noha and "noha" in r['scenario'])): - print "exclude virtual pod results..." + (exclude_noha and "noha" in r['scenario'])): + print("exclude virtual pod results...") else: scenario_results[r['scenario']].append(r) @@ -164,8 +164,8 @@ def getScenarioStatus(installer, version): response.close() results = json.loads(k) test_results = results['results'] - except URLError, e: - print 'Got an error code:', e + except URLError as e: + print('Got an error code:', e) scenario_results = {} result_dict = {} @@ -198,7 +198,7 @@ def getNbtestOk(results): if "PASS" in v: nb_test_ok += 1 except: - print "Cannot retrieve test status" + print("Cannot retrieve test status") return nb_test_ok @@ -269,10 +269,11 @@ def getJenkinsUrl(build_tag): url_base = get_config('functest.jenkins_url') try: build_id = [int(s) for s in build_tag.split("-") if s.isdigit()] - url_id = build_tag[8:-(len(build_id)+3)] + "/" + str(build_id[0]) + url_id = (build_tag[8:-(len(str(build_id[0])) + 1)] + + "/" + str(build_id[0])) jenkins_url = url_base + url_id + "/console" except: - print 'Impossible to get jenkins url:' + print('Impossible to get jenkins url:') return jenkins_url @@ -282,7 +283,7 @@ def getScenarioPercent(scenario_score, scenario_criteria): try: score = float(scenario_score) / float(scenario_criteria) * 100 except: - print 'Impossible to calculate the percentage score' + print('Impossible to calculate the percentage score') return score @@ -330,8 +331,8 @@ def get_percent(four_list, ten_list): def _test(): status = getScenarioStatus("compass", "master") - print "status:++++++++++++++++++++++++" - print json.dumps(status, indent=4) + print("status:++++++++++++++++++++++++") + print(json.dumps(status, indent=4)) # ---------------------------------------------------------- diff --git a/utils/test/testapi/htmlize/htmlize.py b/utils/test/testapi/htmlize/htmlize.py index 075e31f79..b8c4fb43f 100644 --- a/utils/test/testapi/htmlize/htmlize.py +++ b/utils/test/testapi/htmlize/htmlize.py @@ -39,12 +39,14 @@ if __name__ == '__main__': parser.add_argument('-ru', '--resource-listing-url', type=str, required=False, - default='http://testresults.opnfv.org/test/swagger/spec.json', + default=('http://testresults.opnfv.org' + '/test/swagger/spec.json'), help='Resource Listing Spec File') parser.add_argument('-au', '--api-declaration-url', type=str, required=False, - default='http://testresults.opnfv.org/test/swagger/spec', + default=('http://testresults.opnfv.org' + '/test/swagger/spec'), help='API Declaration Spec File') parser.add_argument('-o', '--output-directory', required=True, diff --git a/utils/test/testapi/update/templates/utils.py b/utils/test/testapi/update/templates/utils.py index a18ff0389..4254fee34 100644 --- a/utils/test/testapi/update/templates/utils.py +++ b/utils/test/testapi/update/templates/utils.py @@ -44,5 +44,5 @@ def main(method, parser): args = parser.parse_args() try: method(args) - except AssertionError, msg: + except AssertionError as msg: print(msg) |