diff options
-rwxr-xr-x | ci/exec_test.sh | 13 | ||||
-rw-r--r-- | ci/testcases.yaml | 31 | ||||
-rw-r--r-- | ci/tier_handler.py | 13 | ||||
-rw-r--r-- | docs/userguide/troubleshooting.rst | 2 | ||||
-rw-r--r-- | testcases/Controllers/ODL/odlreport2db.py | 10 | ||||
-rw-r--r-- | testcases/Controllers/ONOS/Teston/adapters/environment.py | 2 | ||||
-rwxr-xr-x | testcases/OpenStack/healthcheck/healthcheck.sh | 13 | ||||
-rw-r--r-- | testcases/OpenStack/tempest/run_tempest.py | 82 | ||||
-rw-r--r-- | testcases/SECTests/OpenSCAP.py | 225 | ||||
-rw-r--r-- | testcases/SECTests/connect.py | 103 | ||||
-rw-r--r-- | testcases/features/bgpvpn.py | 10 | ||||
-rw-r--r-- | testcases/security_scan/config.ini | 25 | ||||
-rw-r--r-- | testcases/security_scan/connect.py | 211 | ||||
-rw-r--r-- | testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini | 25 | ||||
-rw-r--r-- | testcases/security_scan/examples/xccdf-standard.ini | 25 | ||||
-rw-r--r-- | testcases/security_scan/scripts/createfiles.py (renamed from testcases/SECTests/scripts/createfiles.py) | 0 | ||||
-rw-r--r-- | testcases/security_scan/security_scan.py | 178 | ||||
-rw-r--r-- | utils/functest_utils.py | 2 | ||||
-rw-r--r-- | utils/openstack_utils.py | 56 |
19 files changed, 596 insertions, 430 deletions
diff --git a/ci/exec_test.sh b/ci/exec_test.sh index 0bb8389d5..291639715 100755 --- a/ci/exec_test.sh +++ b/ci/exec_test.sh @@ -83,7 +83,10 @@ function run_test(){ ${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/start_tests.sh # push results to the DB in case of CI - if [[ -n "$DEPLOY_SCENARIO" && "$DEPLOY_SCENARIO" != "none" ]]; then + if [[ "$report" == "-r" && + -n "$DEPLOY_SCENARIO" && "$DEPLOY_SCENARIO" != "none" && + -n "$INSTALLER_TYPE" && "$INSTALLER_TYPE" != "none" ]] && + env | grep NODE_NAME > /dev/null; then odl_logs="/home/opnfv/functest/results/odl/logs/2" odl_path="${FUNCTEST_REPO_DIR}/testcases/Controllers/ODL/" node_name=$(env | grep NODE_NAME | cut -f2 -d'=') @@ -126,7 +129,13 @@ function run_test(){ python ${FUNCTEST_REPO_DIR}/testcases/features/doctor.py ;; "ovno") - ${repos_dir}/ovno/Testcases/RunTests.sh + # suite under rewritting for colorado + # no need to run anything until refactoring done + # ${repos_dir}/ovno/Testcases/RunTests.sh + ;; + "security_scan") + # To be enabled once verified manually + # python ${FUNCTEST_REPO_DIR}/Testcases/security_scan/security_scan.py --config config.ini ;; *) echo "The test case '${test_name}' does not exist." diff --git a/ci/testcases.yaml b/ci/testcases.yaml index 663dc2aa6..4cbc00add 100644 --- a/ci/testcases.yaml +++ b/ci/testcases.yaml @@ -63,16 +63,6 @@ tiers: installer: '' scenario: '' - #- - # name: security_groups - # description: >- - # This test case verifies the functionality of the OpenStack - # security groups and that the port rules created are - # fullfilled. - # dependencies: - # installer: '' - # scenario: '' - - name: sdn_suites order: 2 @@ -142,12 +132,20 @@ tiers: installer: '(fuel)|(apex)' scenario: 'bgpvpn' + - + name: security_scan + description: >- + Simple security Scan + dependencies: + installer: 'apex' + scenario: '' + - - name: tempest + name: openstack order: 4 ci_loop: 'weekly' description : >- - This test case runs the full set of the OpenStack Tempest suite. + Extensive testing of OpenStack API. testcases: - name: tempest_full_parallel @@ -159,13 +157,6 @@ tiers: installer: '' scenario: '' - - - name: rally - order: 5 - ci_loop: 'weekly' - description : >- - Rally suite from the OpenStack community. - testcases: - name: rally_full description: >- @@ -177,7 +168,7 @@ tiers: - name: vnf - order: 6 + order: 5 ci_loop: 'weekly' description : >- Collection of VNF test cases. diff --git a/ci/tier_handler.py b/ci/tier_handler.py index 03db4a91d..b1ef52d8f 100644 --- a/ci/tier_handler.py +++ b/ci/tier_handler.py @@ -109,10 +109,15 @@ class TestCase: def is_compatible(self, ci_installer, ci_scenario): try: - return not (re.search(self.dependency.get_installer(), - ci_installer) is None or - re.search(self.dependency.get_scenario(), - ci_scenario) is None) + if ci_installer is not None: + if re.search(self.dependency.get_installer(), + ci_installer) is None: + return False + if ci_scenario is not None: + if re.search(self.dependency.get_scenario(), + ci_scenario) is None: + return False + return not (ci_scenario is None and ci_installer is None) except TypeError: return False diff --git a/docs/userguide/troubleshooting.rst b/docs/userguide/troubleshooting.rst index 406aaad2a..99b92977b 100644 --- a/docs/userguide/troubleshooting.rst +++ b/docs/userguide/troubleshooting.rst @@ -89,7 +89,7 @@ is not **ACTIVE**), you can check why it failed by doing:: It might show some messages about the booting failure. To try that manually:: - nova boot --flavor 2 --image functest-vping --nic net-id=<NET_ID> nova-test + nova boot --flavor m1.small --image functest-vping --nic net-id=<NET_ID> nova-test This will spawn a VM using the network created previously manually. In all the OPNFV tested scenarios from CI, it never has been a problem with the diff --git a/testcases/Controllers/ODL/odlreport2db.py b/testcases/Controllers/ODL/odlreport2db.py index 50c8b096e..8eb78b19a 100644 --- a/testcases/Controllers/ODL/odlreport2db.py +++ b/testcases/Controllers/ODL/odlreport2db.py @@ -33,9 +33,9 @@ import functest.utils.functest_utils as functest_utils def usage(): print """Usage: - get-json-from-robot.py --xml=<output.xml> --pod=<pod_name> - --installer=<installer> --database=<Database URL> - --scenaro=SCENARIO + python odlreport2db.py --xml=<output.xml> --pod=<pod name> + --installer=<installer> --database=<database url> + --scenario=<scenario> -x, --xml xml file generated by robot test -p, --pod POD name where the test come from -i, --installer @@ -76,6 +76,7 @@ def parse_suites(suites): def main(argv): + (xml_file, pod, installer, scenario) = None, None, None, None try: opts, args = getopt.getopt(argv, 'x:p:i:s:h', @@ -100,6 +101,9 @@ def main(argv): else: usage() + if not all(x is not None for x in (xml_file, pod, installer, scenario)): + usage() + with open(xml_file, "r") as myfile: xml_input = myfile.read().replace('\n', '') diff --git a/testcases/Controllers/ONOS/Teston/adapters/environment.py b/testcases/Controllers/ONOS/Teston/adapters/environment.py index 49f7f9632..69c4c010c 100644 --- a/testcases/Controllers/ONOS/Teston/adapters/environment.py +++ b/testcases/Controllers/ONOS/Teston/adapters/environment.py @@ -136,7 +136,7 @@ class environment(connection): print "try to connect " + str(host) result = self.CheckSshNoPasswd(host) if not result: - print ("ssh lgin failed,try to copy master publickey" + + print ("ssh login failed,try to copy master publickey" + "to agent " + str(host)) self.CopyPublicKey(host) self.OnosPushKeys(handle, "onos-push-keys " + self.OCT, masterpass) diff --git a/testcases/OpenStack/healthcheck/healthcheck.sh b/testcases/OpenStack/healthcheck/healthcheck.sh index 611c100c5..d9a83a2af 100755 --- a/testcases/OpenStack/healthcheck/healthcheck.sh +++ b/testcases/OpenStack/healthcheck/healthcheck.sh @@ -182,13 +182,18 @@ info "...Neutron OK!" info "Testing Nova API..." ################################# -nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_1} +# This delay should be removed after resolving Jira case APEX-149. +# The purpose is to give some time to populate openflow rules +# by SDN controller in case of odl_l2 scenario. +sleep 60 + +nova boot --flavor m1.small --image ${image_1} --nic net-id=${net1_id} ${instance_1} debug "nova instance '${instance_1}' booted on ${net_1}." -nova boot --flavor 2 --image ${image_1} --nic net-id=${net1_id} ${instance_2} +nova boot --flavor m1.small --image ${image_1} --nic net-id=${net1_id} ${instance_2} debug "nova instance '${instance_2}' booted on ${net_1}." -nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_3} +nova boot --flavor m1.small --image ${image_2} --nic net-id=${net2_id} ${instance_3} debug "nova instance '${instance_3}' booted on ${net_2}." -nova boot --flavor 2 --image ${image_2} --nic net-id=${net2_id} ${instance_4} +nova boot --flavor m1.small --image ${image_2} --nic net-id=${net2_id} ${instance_4} debug "nova instance '${instance_4}' booted on ${net_2}." vm1_id=$(nova list | grep ${instance_1} | awk '{print $2}') diff --git a/testcases/OpenStack/tempest/run_tempest.py b/testcases/OpenStack/tempest/run_tempest.py index bf62ce306..d8a8a1acb 100644 --- a/testcases/OpenStack/tempest/run_tempest.py +++ b/testcases/OpenStack/tempest/run_tempest.py @@ -254,31 +254,28 @@ def run_tempest(OPTION): # logger.info("Starting Tempest test suite: '%s'." % OPTION) cmd_line = "rally verify start " + OPTION + " --system-wide" - CI_DEBUG = os.environ.get("CI_DEBUG") - if CI_DEBUG == "true" or CI_DEBUG == "True": - ft_utils.execute_command(cmd_line, logger, exit_on_error=True) - else: - header = ("Tempest environment:\n" - " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % - (os.getenv('INSTALLER_TYPE', 'Unknown'), - os.getenv('DEPLOY_SCENARIO', 'Unknown'), - os.getenv('NODE_NAME', 'Unknown'), - time.strftime("%a %b %d %H:%M:%S %Z %Y"))) - f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+') - f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+') - f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+') - f_env.write(header) + header = ("Tempest environment:\n" + " Installer: %s\n Scenario: %s\n Node: %s\n Date: %s\n" % + (os.getenv('INSTALLER_TYPE', 'Unknown'), + os.getenv('DEPLOY_SCENARIO', 'Unknown'), + os.getenv('NODE_NAME', 'Unknown'), + time.strftime("%a %b %d %H:%M:%S %Z %Y"))) + + f_stdout = open(TEMPEST_RESULTS_DIR + "/tempest.log", 'w+') + f_stderr = open(TEMPEST_RESULTS_DIR + "/tempest-error.log", 'w+') + f_env = open(TEMPEST_RESULTS_DIR + "/environment.log", 'w+') + f_env.write(header) - subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr) + subprocess.call(cmd_line, shell=True, stdout=f_stdout, stderr=f_stderr) - f_stdout.close() - f_stderr.close() - f_env.close() + f_stdout.close() + f_stderr.close() + f_env.close() - cmd_line = "rally verify show" - ft_utils.execute_command(cmd_line, logger, - exit_on_error=True, info=True) + cmd_line = "rally verify show" + ft_utils.execute_command(cmd_line, logger, + exit_on_error=True, info=True) cmd_line = "rally verify list" logger.debug('Executing command : {}'.format(cmd_line)) @@ -297,24 +294,35 @@ def run_tempest(OPTION): dur_sec_int = int(round(dur_sec_float, 0)) dur_sec_int = dur_sec_int + 60 * dur_min - # Generate json results for DB - json_results = {"timestart": time_start, "duration": dur_sec_int, - "tests": int(num_tests), "failures": int(num_failures)} - logger.info("Results: " + str(json_results)) - - status = "failed" - try: - diff = (int(num_tests) - int(num_failures)) - success_rate = 100 * diff / int(num_tests) - except: - success_rate = 0 - - # For Tempest we assume that teh success rate is above 90% - if success_rate >= 90: - status = "passed" - # Push results in payload of testcase if args.report: + # Note criteria hardcoded...TODO move to testcase.yaml + status = "failed" + try: + diff = (int(num_tests) - int(num_failures)) + success_rate = 100 * diff / int(num_tests) + except: + success_rate = 0 + + # For Tempest we assume that the success rate is above 90% + if success_rate >= 90: + status = "passed" + + # add the test in error in the details sections + # should be possible to do it during the test + with open(TEMPEST_RESULTS_DIR + "/tempest.log", 'r') as myfile: + output = myfile.read() + error_logs = "" + + for match in re.findall('(.*?)[. ]*FAILED', output): + error_logs += match + + # Generate json results for DB + json_results = {"timestart": time_start, "duration": dur_sec_int, + "tests": int(num_tests), "failures": int(num_failures), + "errors": error_logs} + logger.info("Results: " + str(json_results)) + logger.debug("Push result into DB") push_results_to_db("Tempest", json_results, status) diff --git a/testcases/SECTests/OpenSCAP.py b/testcases/SECTests/OpenSCAP.py deleted file mode 100644 index 40d155c1a..000000000 --- a/testcases/SECTests/OpenSCAP.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2016 Red Hat -# Luke Hinds (lhinds@redhat.com) -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1: This script installs OpenSCAP on the remote host, and scans the -# nominated node. Post scan a report is downloaded and if '--clean' is passed -# all trace of the scan is removed from the remote system. - -import os -import datetime -import argparse - -__version__ = 0.1 -__author__ = 'Luke Hinds (lhinds@redhat.com)' -__url__ = 'https://wiki.opnfv.org/display/functest/Functest+Security' - -''' -Example Run: - python ./OpenSCAP.py --host 192.168.0.24 --port 22 --user root --password - p6ssw0rd oval --secpolicy - /usr/share/xml/scap/ssg/content/ssg-rhel7-oval.xml --report report.html - --results results.xml - -''' - -# Variables needed.. -pwd = os.getcwd() -oscap = '/bin/oscap' -currenttime = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') - -# Set up the main parser -parser = argparse.ArgumentParser(description='OpenSCAP Python Scanner') - -# Main args -# Todo add required = True -parser.add_argument('--user', - action='store', - dest='user', - help='user') -parser.add_argument('--password', - action='store', - dest='password', - help='Password') -parser.add_argument('--host', - action='store', - dest='host', - help='host', - required=True) -parser.add_argument('--port', - action='store', - dest='port"', - help='port', - required=True) -parser.add_argument('--dist', - action='store', - dest='dist', - help='Distribution') -parser.add_argument('--clean', - action='store_true', - dest='clean', - help='Clean all files from host') - -# And the subparser -subparsers = parser.add_subparsers( - title='subcommands', - description='valid subcommands', - help='additional help') - - -parser_xccdf = subparsers.add_parser('xccdf') -parser_xccdf.set_defaults(which='xccdf') - -parser_oval = subparsers.add_parser('oval') -parser_oval.set_defaults(which='oval') - -parser_oval_collect = subparsers.add_parser('oval-collect') -parser_oval_collect.set_defaults(which='oval-collect') - -parser_xccdf.add_argument( - '--profile', - action='store', - dest='profile', - help='xccdf profile') - -parser_oval.add_argument( - '--results', - action='store', - dest='results', - help='Report name (inc extension (.html)') - -parser_oval.add_argument( - '--report', - action='store', - dest='report', - help='Report name (inc extension (.html)') - -parser_oval.add_argument( - '--secpolicy', - action='store', - dest='secpolicy', - help='Security Policy') - -parserout = parser.parse_args() -args = vars(parser.parse_args()) - - -def createfiles(): - import connect - global tmpdir - localpath = os.getcwd() + '/scripts/createfiles.py' - remotepath = '/tmp/createfiles.py' - com = 'python /tmp/createfiles.py' - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - localpath, - remotepath, - com) - tmpdir = connect.remotescript() - - -def install_pkg(): - import connect - com = 'yum -y install openscap-scanner scap-security-guide' - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - com) - install_pkg = connect.remotecmd() - print install_pkg - - -def run_scanner(): - import connect - - if args['which'] == 'xccdf': - print 'xccdf' - com = '{0} xccdf eval'.format(oscap) - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - com) - elif args['which'] == 'oval': - com = ('{0} oval eval --results {1}/{2}' + - ' --report {1}/{3} {4}'.format(oscap, - tmpdir.rstrip(), - parserout.results, - parserout.report, - parserout.secpolicy)) - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - com) - run_tool = connect.remotecmd() - else: - com = '{0} oval-collect '.format(oscap) - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - com) - run_tool = connect.remotecmd() - print run_tool - - -def post_tasks(): - import connect - dl_folder = os.path.join(os.getcwd(), parserout.host + - datetime.datetime.now(). - strftime('%Y-%m-%d_%H-%M-%S')) - os.mkdir(dl_folder, 0755) - reportfile = '{0}/{1}'.format(tmpdir.rstrip(), parserout.report) - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - dl_folder, - reportfile, - parserout.report, - parserout.results) - run_tool = connect.download_reports() - print run_tool - - -def removepkg(): - import connect - com = 'yum -y remove openscap-scanner scap-security-guide' - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - com) - yumremove = connect.remotecmd() - print yumremove - - -def cleandir(): - import connect - com = 'rm -r {0}'.format(tmpdir.rstrip()) - connect = connect.connectionManager(parserout.host, - parserout.user, - parserout.password, - com) - deldir = connect.remotecmd() - print deldir - - -if __name__ == '__main__': - print 'Creating temp file structure...\n' - createfiles() - print 'Install OpenSCAP scanner...\n' - install_pkg() - print 'Running scan...\n' - run_scanner() - print 'Post installation tasks...\n' - post_tasks() - if parserout.clean: - print 'Cleaning down environment...\n' - print 'Removing OpenSCAP...\n' - removepkg() - print 'Deleting tmp file and reports (remote)...\n' - cleandir() diff --git a/testcases/SECTests/connect.py b/testcases/SECTests/connect.py deleted file mode 100644 index f766eabf9..000000000 --- a/testcases/SECTests/connect.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2016 Red Hat -# Luke Hinds (lhinds@redhat.com) -# This program and the accompanying materials -# are made available under the terms of the Apache License, Version 2.0 -# which accompanies this distribution, and is available at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# 0.1: OpenSCAP paramiko connection functions - -import paramiko - -__version__ = 0.1 -__author__ = 'Luke Hinds (lhinds@redhat.com)' -__url__ = 'http:/https://wiki.opnfv.org/display/security' - - -class connectionManager: - def __init__(self, hostname, user, password, *args): - self.hostname = hostname - self.user = user - self.password = password - self.args = args - - def remotescript(self): - localpath = self.args[0] - remotepath = self.args[1] - com = self.args[2] - - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect(self.hostname, 22, self.user, self.password) - - sftp = client.open_sftp() - sftp.put(localpath, remotepath) - - output = "" - stdin, stdout, stderr = client.exec_command(com) - stdout = stdout.readlines() - sftp.remove(remotepath) - client.close() - - # Spool it back (can be improved at later point) - for line in stdout: - output = output + line - if output != "": - return output - - def remotecmd(self): - com = self.args[0] - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect(self.hostname, 22, self.user, self.password) - output = "" - stdin, stdout, stderr = client.exec_command(com) - stdout = stdout.readlines() - client.close() - - # Spool it back (can be improved at later point) - for line in stdout: - output = output + line - if output != "": - return output - else: - print "There was no output for this command" - - def run_tool(self): - # dist = self.args[0] - # report = self.args[1] - com = self.args[2] - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect(self.hostname, 22, self.user, self.password) - - output = "" - stdin, stdout, stderr = client.exec_command(com) - stdout = stdout.readlines() - client.close() - - # Spool it back (can be improved at later point) - for line in stdout: - output = output + line - if output != "": - return output - - def download_reports(self): - dl_folder = self.args[0] - reportfile = self.args[1] - reportname = self.args[2] - resultsname = self.args[3] - transport = paramiko.Transport((self.hostname, 22)) - transport.connect(username=self.user, password=self.password) - sftp = paramiko.SFTPClient.from_transport(transport) - - # Download the reportfile (html) and the results (xml) - print 'Downloading \"{0}\"...'.format(reportname) - sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, reportname))) - print 'Downloading \"{0}\"...'.format(resultsname) - sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, resultsname))) - sftp.close() - transport.close() diff --git a/testcases/features/bgpvpn.py b/testcases/features/bgpvpn.py index 03aecbb32..e3db9209d 100644 --- a/testcases/features/bgpvpn.py +++ b/testcases/features/bgpvpn.py @@ -94,8 +94,16 @@ def main(): logger.info("Results: " + str(json_results)) criteria = "failed" - if int(tests) > 0 and int(failed) < 1: + # criteria = success rate = 100% (i.e all tests passed) + criteria_run = int(tests) + if not failed: + criteria_failed = 0 + else: + criteria_failed = int(failed) + + if criteria_run > 0 and criteria_failed < 1: criteria = "passed" + # Push results in payload of testcase if args.report: logger.debug("Push result into DB") diff --git a/testcases/security_scan/config.ini b/testcases/security_scan/config.ini new file mode 100644 index 000000000..440b23cbe --- /dev/null +++ b/testcases/security_scan/config.ini @@ -0,0 +1,25 @@ +[controller] +port = 22 +user = heat-admin +user_key = /root/.ssh/stackkey +scantype = xccdf +secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml +cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml +profile = standard +report = report.hmtl +results = results.xml +reports_dir=/home/opnfv/functest/results/security_scan/ +clean = True + +[compute] +port = 22 +user = heat-admin +user_key = /root/.ssh/stackkey +scantype = xccdf +secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml +cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml +profile = standard +report = report.hmtl +results = results.xml +reports_dir=/home/opnfv/functest/results/security_scan/ +clean = True diff --git a/testcases/security_scan/connect.py b/testcases/security_scan/connect.py new file mode 100644 index 000000000..9430e9a74 --- /dev/null +++ b/testcases/security_scan/connect.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Red Hat +# Luke Hinds (lhinds@redhat.com) +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# 0.1: OpenSCAP paramiko connection functions + +import os +import socket +import paramiko + +import functest.utils.functest_logger as ft_logger + +# Enable below for connection debugging +# paramiko.util.log_to_file('ssh.log') + +# add installer IP from env +INSTALLER_IP = os.getenv('INSTALLER_IP') + +# Set up logger +logger = ft_logger.Logger("security_scan").getLogger() + + +class novaManager: + def __init__(self, *args): + self.args = args + + def keystonepass(self): + com = self.args[0] + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + try: + client.connect(INSTALLER_IP, port=22, username='stack') + except paramiko.SSHException: + logger.error("Password is invalid for " + "undercloud host: {0}").format(INSTALLER_IP) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "undercloud host: {0}").format(INSTALLER_IP) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(INSTALLER_IP) + stdin, stdout, stderr = client.exec_command(com) + return stdout.read() + + +class connectionManager: + def __init__(self, host, port, user, user_key, *args): + self.host = host + self.port = port + self.user = user + self.user_key = user_key + self.args = args + + def remotescript(self): + localpath = self.args[0] + remotepath = self.args[1] + com = self.args[2] + + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + # Connection to undercloud + try: + client.connect(INSTALLER_IP, port=22, username='stack') + except paramiko.SSHException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(self.host) + + transport = client.get_transport() + local_addr = ('127.0.0.1', 0) + channel = transport.open_channel("direct-tcpip", + (self.host, int(self.port)), + (local_addr)) + remote_client = paramiko.SSHClient() + remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + # Tunnel to overcloud + try: + remote_client.connect('127.0.0.1', port=22, username=self.user, + key_filename=self.user_key, sock=channel) + sftp = remote_client.open_sftp() + sftp.put(localpath, remotepath) + except paramiko.SSHException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(self.host) + + output = "" + stdin, stdout, stderr = remote_client.exec_command(com) + stdout = stdout.readlines() + # remove script + sftp.remove(remotepath) + remote_client.close() + client.close() + # Pipe back stout + for line in stdout: + output = output + line + if output != "": + return output + + def remotecmd(self): + com = self.args[0] + + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + # Connection to undercloud + try: + client.connect(INSTALLER_IP, port=22, username='stack') + except paramiko.SSHException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(self.host) + + transport = client.get_transport() + local_addr = ('127.0.0.1', 0) # 0 denotes choose random port + channel = transport.open_channel("direct-tcpip", + (self.host, int(self.port)), + (local_addr)) + remote_client = paramiko.SSHClient() + remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + # Tunnel to overcloud + try: + remote_client.connect('127.0.0.1', port=22, username=self.user, + key_filename=self.user_key, sock=channel) + except paramiko.SSHException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(self.host) + + chan = remote_client.get_transport().open_session() + chan.get_pty() + f = chan.makefile() + chan.exec_command(com) + print f.read() + + remote_client.close() + client.close() + + def download_reports(self): + dl_folder = self.args[0] + reportfile = self.args[1] + reportname = self.args[2] + resultsname = self.args[3] + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + # Connection to overcloud + try: + client.connect(INSTALLER_IP, port=22, username='stack') + except paramiko.SSHException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(self.host) + + transport = client.get_transport() + local_addr = ('127.0.0.1', 0) # 0 denotes choose random port + channel = transport.open_channel("direct-tcpip", + (self.host, int(self.port)), + (local_addr)) + remote_client = paramiko.SSHClient() + remote_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + # Tunnel to overcloud + try: + remote_client.connect('127.0.0.1', port=22, username=self.user, + key_filename=self.user_key, sock=channel) + except paramiko.SSHException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except paramiko.AuthenticationException: + logger.error("Authentication failed for " + "host: {0}").format(self.host) + except socket.error: + logger.error("Socker Connection failed for " + "undercloud host: {0}").format(self.host) + # Download the reports + sftp = remote_client.open_sftp() + logger.info('Downloading \"{0}\"...\n').format(reportname) + sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, reportname))) + logger.info('Downloading \"{0}\"...\n').format(resultsname) + sftp.get(reportfile, ('{0}/{1}'.format(dl_folder, resultsname))) + sftp.close() + transport.close() diff --git a/testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini b/testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini new file mode 100644 index 000000000..cd5d4d27b --- /dev/null +++ b/testcases/security_scan/examples/xccdf-rhel7-server-upstream.ini @@ -0,0 +1,25 @@ +[controller] +port = 22 +user = heat-admin +user_key = /root/.ssh/stackkey +scantype = xccdf +secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml +cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml +profile = stig-rhel7-server-upstream +report = report.hmtl +results = results.xml +reports_dir=/home/opnfv/functest/results/security_scan/ +clean = True + +[compute] +port = 22 +user = heat-admin +user_key = /root/.ssh/stackkey +scantype = xccdf +secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml +cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml +profile = stig-rhel7-server-upstream +report = report.hmtl +results = results.xml +reports_dir=/home/opnfv/functest/results/security_scan/ +clean = True diff --git a/testcases/security_scan/examples/xccdf-standard.ini b/testcases/security_scan/examples/xccdf-standard.ini new file mode 100644 index 000000000..440b23cbe --- /dev/null +++ b/testcases/security_scan/examples/xccdf-standard.ini @@ -0,0 +1,25 @@ +[controller] +port = 22 +user = heat-admin +user_key = /root/.ssh/stackkey +scantype = xccdf +secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml +cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml +profile = standard +report = report.hmtl +results = results.xml +reports_dir=/home/opnfv/functest/results/security_scan/ +clean = True + +[compute] +port = 22 +user = heat-admin +user_key = /root/.ssh/stackkey +scantype = xccdf +secpolicy = /usr/share/xml/scap/ssg/content/ssg-centos7-xccdf.xml +cpe = /usr/share/xml/scap/ssg/content/ssg-rhel7-cpe-dictionary.xml +profile = standard +report = report.hmtl +results = results.xml +reports_dir=/home/opnfv/functest/results/security_scan/ +clean = True diff --git a/testcases/SECTests/scripts/createfiles.py b/testcases/security_scan/scripts/createfiles.py index b828901a5..b828901a5 100644 --- a/testcases/SECTests/scripts/createfiles.py +++ b/testcases/security_scan/scripts/createfiles.py diff --git a/testcases/security_scan/security_scan.py b/testcases/security_scan/security_scan.py new file mode 100644 index 000000000..f7f02cd9c --- /dev/null +++ b/testcases/security_scan/security_scan.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright (c) 2016 Red Hat +# Luke Hinds (lhinds@redhat.com) +# This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# 0.1: This script installs OpenSCAP on the remote host, and scans the +# nominated node. Post scan a report is downloaded and if '--clean' is passed +# all trace of the scan is removed from the remote system. + +import argparse +import connect +import datetime +import os + +from ConfigParser import SafeConfigParser +from keystoneclient.auth.identity import v2 +from keystoneclient import session +from novaclient import client + +__version__ = 0.1 +__author__ = 'Luke Hinds (lhinds@redhat.com)' +__url__ = 'https://wiki.opnfv.org/display/functest/Functest+Security' + +# Global vars +INSTALLER_IP = os.getenv('INSTALLER_IP') +oscapbin = 'sudo /bin/oscap' + +# Configure Nova Credentials +com = 'sudo hiera admin_password' +connect = connect.novaManager(com) +keypass = connect.keystonepass() +auth = v2.Password(auth_url='http://{0}:5000/v2.0'.format(INSTALLER_IP), + username='admin', + password=str(keypass).rstrip(), + tenant_name='admin') +sess = session.Session(auth=auth) +nova = client.Client(2, session=sess) + + +# args +parser = argparse.ArgumentParser(description='OPNFV OpenSCAP Scanner') +parser.add_argument('--config', action='store', dest='cfgfile', + help='Config file', required=True) +args = parser.parse_args() + +# functest logger +logger = ft_logger.Logger("security_scan").getLogger() + +# Config Parser +cfgparse = SafeConfigParser() +cfgparse.read(args.cfgfile) + + +def run_tests(host, nodetype): + port = cfgparse.get(nodetype, 'port') + user = cfgparse.get(nodetype, 'user') + user_key = cfgparse.get(nodetype, 'user_key') + logger.info("Host: {0} Selected Profile: {1}").format(host, nodetype) + logger.info("Creating temp file structure..") + createfiles(host, port, user, user_key) + logger.info("Installing OpenSCAP...") + install_pkg(host, port, user, user_key) + logger.info("Running scan...") + run_scanner(host, port, user, user_key, nodetype) + clean = cfgparse.get(nodetype, 'clean') + logger.info("Post installation tasks....") + post_tasks(host, port, user, user_key, nodetype) + if clean: + logger.info("Cleaning down environment....") + logger.info("Removing OpenSCAP....") + removepkg(host, port, user, user_key, nodetype) + logger.info("Deleting tmp file and reports (remote)...") + cleandir(host, port, user, user_key, nodetype) + + +def nova_iterate(): + # Find compute nodes, active with network on ctlplane + for server in nova.servers.list(): + if server.status == 'ACTIVE' and 'compute' in server.name: + networks = server.networks + nodetype = 'compute' + for host in networks['ctlplane']: + run_tests(host, nodetype) + # Find controller nodes, active with network on ctlplane + elif server.status == 'ACTIVE' and 'controller' in server.name: + networks = server.networks + nodetype = 'controller' + for host in networks['ctlplane']: + run_tests(host, nodetype) + + +def createfiles(host, port, user, user_key): + import connect + global tmpdir + localpath = os.getcwd() + '/scripts/createfiles.py' + remotepath = '/tmp/createfiles.py' + com = 'python /tmp/createfiles.py' + connect = connect.connectionManager(host, port, user, user_key, + localpath, remotepath, com) + tmpdir = connect.remotescript() + + +def install_pkg(host, port, user, user_key): + import connect + com = 'sudo yum -y install openscap-scanner scap-security-guide' + connect = connect.connectionManager(host, port, user, user_key, com) + connect.remotecmd() + + +def run_scanner(host, port, user, user_key, nodetype): + import connect + scantype = cfgparse.get(nodetype, 'scantype') + profile = cfgparse.get(nodetype, 'profile') + results = cfgparse.get(nodetype, 'results') + report = cfgparse.get(nodetype, 'report') + secpolicy = cfgparse.get(nodetype, 'secpolicy') + # Here is where we contruct the actual scan command + if scantype == 'xccdf': + cpe = cfgparse.get(nodetype, 'cpe') + com = '{0} xccdf eval --profile {1} --results {2}/{3}' \ + ' --report {2}/{4} --cpe {5} {6}'.format(oscapbin, + profile, + tmpdir.rstrip(), + results, + report, + cpe, + secpolicy) + connect = connect.connectionManager(host, port, user, user_key, com) + connect.remotecmd() + elif scantype == 'oval': + com = '{0} oval eval --results {1}/{2} ' + '--report {1}/{3} {4}'.format(oscapbin, tmpdir.rstrip(), + results, report, secpolicy) + connect = connect.connectionManager(host, port, user, user_key, com) + connect.remotecmd() + else: + com = '{0} oval-collect '.format(oscapbin) + connect = connect.connectionManager(host, port, user, user_key, com) + connect.remotecmd() + + +def post_tasks(host, port, user, user_key, nodetype): + import connect + # Create the download folder for functest dashboard and download reports + reports_dir = cfgparse.get(nodetype, 'reports_dir') + dl_folder = os.path.join(reports_dir, host + "_" + + datetime.datetime. + now().strftime('%Y-%m-%d_%H-%M-%S')) + os.makesdir(dl_folder, 0755) + report = cfgparse.get(nodetype, 'report') + results = cfgparse.get(nodetype, 'results') + reportfile = '{0}/{1}'.format(tmpdir.rstrip(), report) + connect = connect.connectionManager(host, port, user, user_key, dl_folder, + reportfile, report, results) + connect.download_reports() + + +def removepkg(host, port, user, user_key, nodetype): + import connect + com = 'sudo yum -y remove openscap-scanner scap-security-guide' + connect = connect.connectionManager(host, port, user, user_key, com) + connect.remotecmd() + + +def cleandir(host, port, user, user_key, nodetype): + import connect + com = 'sudo rm -r {0}'.format(tmpdir.rstrip()) + connect = connect.connectionManager(host, port, user, user_key, com) + connect.remotecmd() + + +if __name__ == '__main__': + nova_iterate() diff --git a/utils/functest_utils.py b/utils/functest_utils.py index 8ee5346c1..23dea7e5a 100644 --- a/utils/functest_utils.py +++ b/utils/functest_utils.py @@ -165,7 +165,7 @@ def push_results_to_db(db_url, project, case_name, logger, pod_name, print ("Error [push_results_to_db('%s', '%s', '%s', " + "'%s', '%s', '%s', '%s', '%s', '%s')]:" % (db_url, project, case_name, pod_name, version, - scenario, criteria, build_tag, payload), e) + scenario, criteria, build_tag, payload)), e return False diff --git a/utils/openstack_utils.py b/utils/openstack_utils.py index fc89fd2b8..4084e1fc2 100644 --- a/utils/openstack_utils.py +++ b/utils/openstack_utils.py @@ -101,7 +101,7 @@ def get_instance_status(nova_client, instance): return instance.status except: # print ("Error [get_instance_status(nova_client, '%s')]:" % - # str(instance), e) + # str(instance)), e return None @@ -111,7 +111,7 @@ def get_instance_by_name(nova_client, instance_name): return instance except Exception, e: print ("Error [get_instance_by_name(nova_client, '%s')]:" % - instance_name, e) + instance_name), e return None @@ -149,7 +149,7 @@ def create_flavor(nova_client, flavor_name, ram, disk, vcpus): flavor = nova_client.flavors.create(flavor_name, ram, vcpus, disk) except Exception, e: print ("Error [create_flavor(nova_client, '%s', '%s', '%s', " - "'%s')]:" % (flavor_name, ram, disk, vcpus), e) + "'%s')]:" % (flavor_name, ram, disk, vcpus)), e return None return flavor.id @@ -173,7 +173,7 @@ def add_floating_ip(nova_client, server_id, floatingip_id): return True except Exception, e: print ("Error [add_floating_ip(nova_client, '%s', '%s')]:" % - (server_id, floatingip_id), e) + (server_id, floatingip_id)), e return False @@ -192,7 +192,7 @@ def delete_floating_ip(nova_client, floatingip_id): return True except Exception, e: print ("Error [delete_floating_ip(nova_client, '%s')]:" % - floatingip_id, e) + floatingip_id), e return False @@ -306,7 +306,7 @@ def create_neutron_subnet(neutron_client, name, cidr, net_id): return subnet['subnets'][0]['id'] except Exception, e: print ("Error [create_neutron_subnet(neutron_client, '%s', '%s', " - "'%s')]:" % (name, cidr, net_id), e) + "'%s')]:" % (name, cidr, net_id)), e return False @@ -332,7 +332,7 @@ def create_neutron_port(neutron_client, name, network_id, ip): return port['port']['id'] except Exception, e: print ("Error [create_neutron_port(neutron_client, '%s', '%s', " - "'%s')]:" % (name, network_id, ip), e) + "'%s')]:" % (name, network_id, ip)), e return False @@ -343,7 +343,7 @@ def update_neutron_net(neutron_client, network_id, shared=False): return True except Exception, e: print ("Error [update_neutron_net(neutron_client, '%s', '%s')]:" % - (network_id, str(shared)), e) + (network_id, str(shared))), e return False @@ -357,7 +357,7 @@ def update_neutron_port(neutron_client, port_id, device_owner): return port['port']['id'] except Exception, e: print ("Error [update_neutron_port(neutron_client, '%s', '%s')]:" % - (port_id, device_owner), e) + (port_id, device_owner)), e return False @@ -368,7 +368,7 @@ def add_interface_router(neutron_client, router_id, subnet_id): return True except Exception, e: print ("Error [add_interface_router(neutron_client, '%s', '%s')]:" % - (router_id, subnet_id), e) + (router_id, subnet_id)), e return False @@ -380,7 +380,7 @@ def add_gateway_router(neutron_client, router_id): return True except Exception, e: print ("Error [add_gateway_router(neutron_client, '%s')]:" % - router_id, e) + router_id), e return False @@ -390,7 +390,7 @@ def delete_neutron_net(neutron_client, network_id): return True except Exception, e: print ("Error [delete_neutron_net(neutron_client, '%s')]:" % - network_id, e) + network_id), e return False @@ -400,7 +400,7 @@ def delete_neutron_subnet(neutron_client, subnet_id): return True except Exception, e: print ("Error [delete_neutron_subnet(neutron_client, '%s')]:" % - subnet_id, e) + subnet_id), e return False @@ -410,7 +410,7 @@ def delete_neutron_router(neutron_client, router_id): return True except Exception, e: print ("Error [delete_neutron_router(neutron_client, '%s')]:" % - router_id, e) + router_id), e return False @@ -431,7 +431,7 @@ def remove_interface_router(neutron_client, router_id, subnet_id): return True except Exception, e: print ("Error [remove_interface_router(neutron_client, '%s', '%s')]:" % - (router_id, subnet_id), e) + (router_id, subnet_id)), e return False @@ -441,7 +441,7 @@ def remove_gateway_router(neutron_client, router_id): return True except Exception, e: print ("Error [remove_gateway_router(neutron_client, '%s')]:" % - router_id, e) + router_id), e return False @@ -532,7 +532,7 @@ def create_security_group(neutron_client, sg_name, sg_description): return secgroup['security_group'] except Exception, e: print ("Error [create_security_group(neutron_client, '%s', '%s')]:" % - (sg_name, sg_description), e) + (sg_name, sg_description)), e return False @@ -562,7 +562,7 @@ def create_secgroup_rule(neutron_client, sg_id, direction, protocol, print ("Error [create_secgroup_rule(neutron_client, '%s', '%s', " "'%s', '%s', '%s', '%s')]:" % (neutron_client, sg_id, direction, port_range_min, port_range_max, - protocol), e) + protocol)), e return False @@ -572,7 +572,7 @@ def add_secgroup_to_instance(nova_client, instance_id, secgroup_id): return True except Exception, e: print ("Error [add_secgroup_to_instance(nova_client, '%s', '%s')]: " % - (instance_id, secgroup_id), e) + (instance_id, secgroup_id)), e return False @@ -588,7 +588,7 @@ def update_sg_quota(neutron_client, tenant_id, sg_quota, sg_rule_quota): return True except Exception, e: print ("Error [update_sg_quota(neutron_client, '%s', '%s', " - "'%s')]:" % (tenant_id, sg_quota, sg_rule_quota), e) + "'%s')]:" % (tenant_id, sg_quota, sg_rule_quota)), e return False @@ -598,7 +598,7 @@ def delete_security_group(neutron_client, secgroup_id): return True except Exception, e: print ("Error [delete_security_group(neutron_client, '%s')]:" % - secgroup_id, e) + secgroup_id), e return False @@ -638,7 +638,7 @@ def create_glance_image(glance_client, image_name, file_path, public=True): return image.id except Exception, e: print ("Error [create_glance_image(glance_client, '%s', '%s', " - "'%s')]:" % (image_name, file_path, str(public)), e) + "'%s')]:" % (image_name, file_path, str(public))), e return False @@ -647,7 +647,7 @@ def delete_glance_image(nova_client, image_id): nova_client.images.delete(image_id) return True except Exception, e: - print ("Error [delete_glance_image(nova_client, '%s')]:" % image_id, e) + print ("Error [delete_glance_image(nova_client, '%s')]:" % image_id), e return False @@ -697,7 +697,7 @@ def update_cinder_quota(cinder_client, tenant_id, vols_quota, except Exception, e: print ("Error [update_cinder_quota(cinder_client, '%s', '%s', '%s'" "'%s')]:" % (tenant_id, vols_quota, - snapshots_quota, gigabytes_quota), e) + snapshots_quota, gigabytes_quota)), e return False @@ -714,7 +714,7 @@ def delete_volume(cinder_client, volume_id, forced=False): return True except Exception, e: print ("Error [delete_volume(cinder_client, '%s', '%s')]:" % - (volume_id, str(forced)), e) + (volume_id, str(forced))), e return False @@ -724,7 +724,7 @@ def delete_volume_type(cinder_client, volume_type): return True except Exception, e: print ("Error [delete_volume_type(cinder_client, '%s')]:" % - volume_type, e) + volume_type), e return False @@ -787,7 +787,7 @@ def create_tenant(keystone_client, tenant_name, tenant_description): return tenant.id except Exception, e: print ("Error [create_tenant(cinder_client, '%s', '%s')]:" % - (tenant_name, tenant_description), e) + (tenant_name, tenant_description)), e return False @@ -811,7 +811,7 @@ def add_role_user(keystone_client, user_id, role_id, tenant_id): return True except Exception, e: print ("Error [add_role_user(keystone_client, '%s', '%s'" - "'%s')]:" % (user_id, role_id, tenant_id), e) + "'%s')]:" % (user_id, role_id, tenant_id)), e return False |