aboutsummaryrefslogtreecommitdiffstats
path: root/baro_tests
diff options
context:
space:
mode:
Diffstat (limited to 'baro_tests')
-rw-r--r--baro_tests/barometer.py2
-rw-r--r--baro_tests/collectd.py46
-rw-r--r--baro_tests/config_server.py243
-rw-r--r--baro_tests/dma.py265
-rw-r--r--baro_tests/tests.py19
5 files changed, 553 insertions, 22 deletions
diff --git a/baro_tests/barometer.py b/baro_tests/barometer.py
index a798f245..f75dbc31 100644
--- a/baro_tests/barometer.py
+++ b/baro_tests/barometer.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python
#
+# Copyright 2017 OPNFV
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
diff --git a/baro_tests/collectd.py b/baro_tests/collectd.py
index 188afab1..c1a05afb 100644
--- a/baro_tests/collectd.py
+++ b/baro_tests/collectd.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
-
+#
+# Copyright 2017 OPNFV
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -23,6 +25,8 @@ import time
import logging
import config_server
import tests
+import dma
+from distutils import version
from opnfv.deployment import factory
AODH_NAME = 'aodh'
@@ -468,9 +472,6 @@ def _exec_testcase(
bridge for bridge in ovs_interfaces
if bridge in ovs_configured_bridges]
plugin_prerequisites = {
- 'intel_rdt': [(
- conf.is_rdt_available(compute_node),
- 'RDT not avaialble on VMs')],
'mcelog': [(
conf.is_mcelog_installed(compute_node, 'mcelog'),
'mcelog must be installed.')],
@@ -563,6 +564,18 @@ def _exec_testcase(
+ 'following prerequisites failed:')
for prerequisite in failed_prerequisites:
logger.error(' * {}'.format(prerequisite))
+ # optional plugin
+ elif "intel_rdt" == name and not conf.is_rdt_available(compute_node):
+ #TODO: print log message
+ logger.info("RDT is not available on virtual nodes, skipping test.")
+ res = True
+ print("Results for {}, pre-processing".format(str(test_labels[name])))
+ print(results)
+ _process_result(
+ compute_node.get_id(), out_plugin, test_labels[name],
+ res, results, compute_node.get_name())
+ print("Results for {}, post-processing".format(str(test_labels[name])))
+ print(results)
else:
plugin_interval = conf.get_plugin_interval(compute_node, name)
if out_plugin == 'Gnocchi':
@@ -591,9 +604,13 @@ def _exec_testcase(
'Test works, but will be reported as failure,'
+ 'because of non-critical errors.')
res = False
+ print("Results for {}, pre-processing".format(str(test_labels[name])))
+ print(results)
_process_result(
compute_node.get_id(), out_plugin, test_labels[name],
res, results, compute_node.get_name())
+ print("Results for {}, post-processing".format(str(test_labels[name])))
+ print(results)
def get_results_for_ovs_events(
@@ -636,7 +653,7 @@ def mcelog_install():
for node in nodes:
if node.is_compute():
centos_release = node.run_cmd('uname -r')
- if centos_release not in ('3.10.0-514.26.2.el7.x86_64', '3.10.0-693.17.1.el7.x86_64'):
+ if version.LooseVersion(centos_release) < version.LooseVersion('3.10.0-514.26.2.el7.x86_64'):
logger.info(
'Mcelog will NOT be enabled on node-{}.'
+ ' Unsupported CentOS release found ({}).'.format(
@@ -779,15 +796,15 @@ def main(bt_logger=None):
compute_node_names.append(node_name)
plugins_to_enable = []
error_plugins = []
- gnocchi_running = (
+ gnocchi_running_com = (
gnocchi_running and conf.check_gnocchi_plugin_included(
compute_node))
- aodh_running = (
+ aodh_running_com = (
aodh_running and conf.check_aodh_plugin_included(compute_node))
# logger.info("SNMP enabled on {}" .format(node_name))
- if gnocchi_running:
+ if gnocchi_running_com:
out_plugins[node_id].append("Gnocchi")
- if aodh_running:
+ if aodh_running_com:
out_plugins[node_id].append("AODH")
if snmp_running:
out_plugins[node_id].append("SNMP")
@@ -852,14 +869,19 @@ def main(bt_logger=None):
print_overall_summary(
compute_ids, plugin_labels, aodh_plugin_labels, results, out_plugins)
+ res_overall = 0
for res in results:
- if res[3] is 'False' or 'None':
+ if not res[3]:
logger.error('Some tests have failed or have not been executed')
logger.error('Overall Result is Fail')
- return 1
+ res_overall = 1
else:
pass
- return 0
+
+ _print_label('Testing DMA on compute nodes')
+ res_agent = dma.dma_main(logger, conf, computes)
+
+ return 0 if res_overall == 0 and res_agent == 0 else 1
if __name__ == '__main__':
diff --git a/baro_tests/config_server.py b/baro_tests/config_server.py
index 2a4bc167..a6849f05 100644
--- a/baro_tests/config_server.py
+++ b/baro_tests/config_server.py
@@ -1,16 +1,19 @@
# -*- coding: utf-8 -*-
#
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
"""Classes used by collectd.py"""
@@ -18,6 +21,7 @@ import time
import os.path
import os
import re
+import yaml
from opnfv.deployment import factory
import paramiko
@@ -34,6 +38,8 @@ APEX_IP = os.getenv("INSTALLER_IP").rstrip('\n')
APEX_USER = 'root'
APEX_USER_STACK = 'stack'
APEX_PKEY = '/root/.ssh/id_rsa'
+TEST_VM_IMAGE = 'cirros-0.4.0-x86_64-disk.img'
+TEST_VM_IMAGE_PATH = '/home/opnfv/functest/images/' + TEST_VM_IMAGE
class Node(object):
@@ -300,6 +306,97 @@ class ConfigServer(object):
return False
return aodh_present
+ def is_redis_running(self, compute):
+ """Check whether redis service is running on compute"""
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ stdout = node.run_cmd('sudo systemctl status docker'
+ '&& sudo docker ps'
+ '| grep barometer-redis')
+ if stdout and 'barometer-redis' in stdout:
+ self.__logger.info(
+ 'Redis is running in node {}'.format(
+ compute_name))
+ return True
+ self.__logger.info(
+ 'Redis is *not* running in node {}'.format(
+ compute_name))
+ return False
+
+ def is_dma_server_running(self, compute):
+ """Check whether DMA server is running on compute"""
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ stdout = node.run_cmd('sudo systemctl status docker'
+ '&& sudo docker ps'
+ '| grep opnfv/barometer-dma')
+ if stdout and '/server' in stdout:
+ self.__logger.info(
+ 'DMA Server is running in node {}'.format(
+ compute_name))
+ return True
+ self.__logger.info(
+ 'DMA Server is *not* running in node {}'.format(
+ compute_name))
+ return False
+
+ def is_dma_infofetch_running(self, compute):
+ """Check whether DMA infofetch is running on compute"""
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ stdout = node.run_cmd('sudo systemctl status docker'
+ '&& sudo docker ps'
+ '| grep opnfv/barometer-dma')
+ if stdout and '/infofetch' in stdout:
+ self.__logger.info(
+ 'DMA InfoFetch is running in node {}'.format(
+ compute_name))
+ return True
+ self.__logger.info(
+ 'DMA InfoFetch is *not* running in node {}'.format(
+ compute_name))
+ return False
+
+ def get_dma_config(self, compute):
+ """Get config values of DMA"""
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ # We use following after functest accept python-toml
+ # stdout = node.run_cmd(
+ # 'cat /etc/barometer-dma/config.toml')
+ # try:
+ # agent_conf = toml.loads(stdout)
+ # except (TypeError, TomlDecodeError) as e:
+ # self.__logger.error(
+ # 'DMA config error: {}'.format(e))
+ # agent_conf = None
+ # finally:
+ # return agent_conf
+ readcmd = (
+ 'egrep "listen_port|amqp_"'
+ ' /etc/barometer-dma/config.toml'
+ '| sed -e "s/#.*$//" | sed -e "s/=/:/"'
+ )
+ stdout = node.run_cmd(readcmd)
+ agent_conf = {"server": yaml.safe_load(stdout)}
+
+ pingcmd = (
+ 'ping -n -c1 ' + agent_conf["server"]["amqp_host"] +
+ '| sed -ne "s/^.*bytes from //p" | sed -e "s/:.*//"'
+ )
+ agent_conf["server"]["amqp_host"] = node.run_cmd(pingcmd)
+
+ return agent_conf
+ return None
+
def is_mcelog_installed(self, compute, package):
"""Check whether package exists on compute node.
@@ -479,6 +576,16 @@ class ConfigServer(object):
return False, warning
return True, warning
+ def trigger_alarm_update(self, alarm, compute_node):
+ # TODO: move these actions to main, with criteria lists so that we can reference that
+ # i.e. test_plugin_with_aodh(self, compute, plugin.., logger, criteria_list, alarm_action)
+ if alarm == 'mcelog':
+ compute_node.run_cmd('sudo modprobe mce-inject')
+ compute_node.run_cmd('sudo ./mce-inject_ea < corrected')
+ if alarm == 'ovs_events':
+ compute_node.run_cmd('sudo ifconfig -a | grep br0')
+ compute_node.run_cmd('sudo ifconfig br0 down; sudo ifconfig br0 up')
+
def test_plugins_with_aodh(
self, compute, plugin_interval, logger,
criteria_list=[]):
@@ -487,11 +594,13 @@ class ConfigServer(object):
timestamps1 = {}
timestamps2 = {}
nodes = get_apex_nodes()
+ compute_node = [node for node in nodes if node.get_dict()['name'] == compute][0]
for node in nodes:
if node.is_controller():
self.__logger.info('Getting AODH Alarm list on {}' .format(
(node.get_dict()['name'])))
node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+ self.trigger_alarm_update(criteria_list, compute_node)
stdout = node.run_cmd(
"source overcloudrc.v3;"
+ "aodh alarm list | grep {0} | grep {1}"
@@ -510,10 +619,9 @@ class ConfigServer(object):
return False
for line in stdout.splitlines()[3: -1]:
line = line.replace('|', "")
- if line.split()[0] == 'timestamp':
+ if line.split()[0] == 'state_timestamp':
timestamps1 = line.split()[1]
- else:
- pass
+ self.trigger_alarm_update(criteria_list, compute_node)
time.sleep(12)
stdout = node.run_cmd(
"source overcloudrc.v3; aodh alarm show {}" .format(
@@ -523,10 +631,8 @@ class ConfigServer(object):
return False
for line in stdout.splitlines()[3:-1]:
line = line.replace('|', "")
- if line.split()[0] == 'timestamp':
+ if line.split()[0] == 'state_timestamp':
timestamps2 = line.split()[1]
- else:
- pass
if timestamps1 == timestamps2:
self.__logger.info(
"Data not updated after interval of 12 seconds")
@@ -604,7 +710,7 @@ class ConfigServer(object):
self, compute, plugin_interval, logger, plugin, snmp_mib_files=[],
snmp_mib_strings=[], snmp_in_commands=[]):
- if plugin == 'hugepages' or 'intel_rdt' or 'mcelog':
+ if plugin in ('hugepages', 'intel_rdt', 'mcelog'):
nodes = get_apex_nodes()
for node in nodes:
if compute == node.get_dict()['name']:
@@ -651,3 +757,124 @@ class ConfigServer(object):
return True
else:
return False
+
+ def check_dma_dummy_included(self, compute, name):
+ """Check if dummy collectd config by DMA
+ is included in collectd.conf file.
+
+ Keyword arguments:
+ compute -- compute node instance
+ name -- config file name
+ """
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ dummy_conf = node.run_cmd('ls /etc/collectd/collectd.conf.d')
+ if name + '.conf' not in dummy_conf:
+ self.__logger.error('check conf FAIL')
+ return False
+ else:
+ self.__logger.info('check conf PASS')
+ fullpath = '/etc/collectd/collectd.conf.d/{}'.format(
+ name + '.conf')
+ self.__logger.info('Delete file {}'.format(fullpath))
+ node.run_cmd('sudo rm -f ' + fullpath)
+ return True
+ self.__logger.error('Some panic, compute not found')
+ return False
+
+ def create_testvm(self, compute_node, test_name):
+ nodes = get_apex_nodes()
+ compute_name = compute_node.get_name()
+
+ controller_node = None
+ for node in nodes:
+ if node.is_controller():
+ controller_node = node
+ break
+
+ self.__logger.debug('Creating Test VM on {}' .format(compute_name))
+ self.__logger.debug('Create command is executed in {}' .format(
+ (controller_node.get_dict()['name'])))
+
+ node.put_file(constants.ENV_FILE, 'overcloudrc.v3')
+ node.put_file(TEST_VM_IMAGE_PATH, TEST_VM_IMAGE)
+ image = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack image create -f value -c id'
+ ' --disk-format qcow2 --file {0} {1}'
+ .format(TEST_VM_IMAGE, test_name))
+ flavor = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack flavor create -f value -c id {}'
+ .format(test_name))
+ host = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack hypervisor list -f value -c "Hypervisor Hostname"'
+ ' | grep "^{}\\."'
+ .format(compute_name))
+ server = controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack server create -f value -c id'
+ ' --image {0} --flavor {1} --availability-zone {2} {3}'
+ .format(image, flavor, 'nova:' + host, test_name))
+
+ resources = {"image": image, "flavor": flavor, "server": server}
+
+ if server:
+ self.__logger.debug('VM created')
+ self.__logger.debug('VM info: {}'.format(resources))
+
+ return resources
+
+ def delete_testvm(self, resources):
+ nodes = get_apex_nodes()
+
+ controller_node = None
+ for node in nodes:
+ if node.is_controller():
+ controller_node = node
+ break
+
+ self.__logger.debug('Deleteing Test VM')
+ self.__logger.debug('VM to be deleted info: {}'.format(resources))
+ self.__logger.debug('Delete command is executed in {}' .format(
+ (controller_node.get_dict()['name'])))
+
+ server = resources.get('server', None)
+ flavor = resources.get('flavor', None)
+ image = resources.get('image', None)
+ if server:
+ controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack server delete {}'.format(server))
+ if flavor:
+ controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack flavor delete {}'.format(flavor))
+ if image:
+ controller_node.run_cmd(
+ 'source overcloudrc.v3;'
+ 'openstack image delete {}'.format(image))
+
+ self.__logger.debug('VM and other OpenStack resources deleted')
+
+ def test_dma_infofetch_get_data(self, compute, test_name):
+ compute_name = compute.get_name()
+ nodes = get_apex_nodes()
+ for node in nodes:
+ if compute_name == node.get_dict()['name']:
+ stdout = node.run_cmd(
+ 'redis-cli keys "barometer-dma/vm/*/vminfo"'
+ ' | while read k; do redis-cli get $k; done'
+ ' | grep {}'.format(test_name))
+ self.__logger.debug('InfoFetch data: {}'.format(stdout))
+ if stdout and test_name in stdout:
+ self.__logger.info('PASS')
+ return True
+ else:
+ self.__logger.info('No test vm info')
+
+ self.__logger.info('FAIL')
+ return False
diff --git a/baro_tests/dma.py b/baro_tests/dma.py
new file mode 100644
index 00000000..4a44480b
--- /dev/null
+++ b/baro_tests/dma.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2018 OPNFV
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+# Patch on October 10 2017
+
+"""Executing test of DMA"""
+
+import os
+import pika
+import requests
+import time
+
+import tests
+
+logger = None
+
+TEMP_DIR = '/root'
+
+
+class DMAClient(object):
+ """Client to request DMA"""
+ def __init__(self, host, port, user, passwd):
+ """
+ Keyword arguments:
+ host -- Host URL
+ port -- Host Port
+ user -- Username
+ passwd -- Password
+ """
+ self._host = host
+ self._port = port
+ self._user = user
+ self._passwd = passwd
+
+ def set(self, file):
+ logger.error('Do nothing to DMA')
+
+ def __str__(self):
+ return ('host: {0}, port: {1}, user: {2}, pass: {3}'
+ .format(self._host, self._port,
+ self._user, (self._passwd and '<Filterd>')))
+
+
+class RestDMAClient(DMAClient):
+ """Client to request DMA using REST"""
+ def __init__(self, host, port, user, passwd):
+ super(self.__class__, self).__init__(host, port, user, passwd)
+
+ def set(self, file):
+ logger.debug('Send to DMA using REST -- {}'.format(str(self)))
+
+ if not os.path.isfile(file):
+ print '{} is not found'.format(file)
+ return False
+ filename = os.path.basename(file)
+
+ url = 'http://{0}:{1}/collectd/conf'.format(self._host, self._port)
+ config = {'file': (filename, open(file, 'r'))}
+ requests.post(url, files=config)
+
+ return True
+
+
+class PubDMAClient(DMAClient):
+ """Client to request DMA using AMQP Publish"""
+ def __init__(self, host, port, user, passwd):
+ super(self.__class__, self).__init__(host, port, user, passwd)
+
+ def set(self, file):
+ logger.debug('Send to DMA using AMQP Publish -- {}'
+ .format(str(self)))
+
+ if not os.path.isfile(file):
+ print '{} is not found'.format(file)
+ return False
+ filename = os.path.basename(file)
+ filebody = open(file, 'r').read()
+ message = filename + '/' + filebody
+
+ credentials = pika.PlainCredentials(self._user, self._passwd)
+ connection = pika.BlockingConnection(pika.ConnectionParameters(
+ host=self._host, port=int(self._port),
+ credentials=credentials))
+ channel = connection.channel()
+ channel.exchange_declare(exchange='collectd-conf',
+ exchange_type='fanout')
+ channel.basic_publish(exchange='collectd-conf',
+ routing_key='',
+ body=message)
+
+ connection.close()
+ return True
+
+
+def _process_dma_result(compute_node, testfunc,
+ result, results_list, node):
+ """Print DMA test result and append it to results list.
+
+ Keyword arguments:
+ testfunc -- DMA function name
+ result -- boolean test result
+ results_list -- results list
+ """
+ if result:
+ logger.info(
+ 'Test case for {0} with DMA PASSED on {1}.'.format(
+ node, testfunc))
+ else:
+ logger.error(
+ 'Test case for {0} with DMA FAILED on {1}.'.format(
+ node, testfunc))
+ results_list.append((compute_node, "DMA", testfunc, result))
+
+
+def _print_result_of_dma(compute_ids, results):
+ """Print results of DMA.
+
+ Keyword arguments:
+ compute_ids -- list of compute node IDs
+ results -- results list
+ """
+ compute_node_names = ['Node-{}'.format(i) for i in range(
+ len((compute_ids)))]
+ all_computes_in_line = ''
+ for compute in compute_node_names:
+ all_computes_in_line += '| ' + compute + (' ' * (7 - len(compute)))
+ line_of_nodes = '| Test ' + all_computes_in_line + '|'
+ logger.info('=' * 70)
+ logger.info('+' + ('-' * ((9 * len(compute_node_names))+16)) + '+')
+ logger.info(
+ '|' + ' ' * ((9*len(compute_node_names))/2)
+ + ' DMA TEST '
+ + ' ' * (
+ 9*len(compute_node_names) - (9*len(compute_node_names))/2)
+ + '|')
+ logger.info(
+ '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+ logger.info(line_of_nodes)
+ logger.info(
+ '+' + ('-' * 16) + '+' + (('-' * 8) + '+') * len(compute_node_names))
+
+ testname = "DMA"
+ print_line = ''
+ for id in compute_ids:
+ all_result = \
+ 'FAIL' if [
+ testfunc for comp_id, testname, testfunc, res in results
+ if comp_id == id and not res] else 'PASS'
+ print_line += '| ' + all_result + ' '
+ logger.info(
+ '| {}'.format(testname) + (' ' * (15 - len(testname)))
+ + print_line + '|')
+
+ for testfunc in ['Server', 'InfoFetch']:
+ print_line = ''
+ for id in compute_ids:
+ if (id, testname, testfunc, True) in results:
+ print_line += ' PASS |'
+ elif (id, testname, testfunc, False) in results:
+ print_line += ' FAIL |'
+ else:
+ print_line += ' SKIP |'
+ logger.info(
+ '| {}'.format(testfunc) + (' ' * (14-len(testfunc)))
+ + '|' + print_line)
+
+ logger.info(
+ '+' + ('-' * 16) + '+'
+ + (('-' * 8) + '+') * len(compute_node_names))
+ logger.info('=' * 70)
+
+
+def dma_main(bt_logger, conf, computes):
+ """Check DMA of each compute node.
+
+ Keyword arguments:
+ bt_logger -- logger instance
+ computes -- compute node list
+ """
+ global logger
+ logger = bt_logger
+
+ compute_ids = []
+ agent_results = []
+ for compute_node in computes:
+ node_id = compute_node.get_id()
+ compute_ids.append(node_id)
+
+ agent_server_running = conf.is_dma_server_running(compute_node)
+ agent_infofetch_running = (
+ conf.is_dma_infofetch_running(compute_node) and
+ conf.is_redis_running(compute_node))
+
+ if agent_server_running:
+ test_name = 'barotest'
+ tmpfile = TEMP_DIR + '/' + test_name + '.conf'
+
+ agent_config = conf.get_dma_config(compute_node)
+ listen_ip = compute_node.get_ip()
+ listen_port = agent_config.get('server').get('listen_port')
+ amqp_host = agent_config.get('server').get('amqp_host')
+ amqp_port = agent_config.get('server').get('amqp_port')
+ amqp_user = agent_config.get('server').get('amqp_user')
+ amqp_passwd = agent_config.get('server').get('amqp_password')
+ rest_client = RestDMAClient(
+ listen_ip, listen_port, '', '')
+ pub_client = PubDMAClient(
+ amqp_host, amqp_port, amqp_user,
+ amqp_passwd)
+
+ all_res = True
+ for client in [rest_client, pub_client]:
+ tests.test_dma_server_set_collectd(
+ compute_node, tmpfile, logger, client)
+ sleep_time = 1
+ logger.info(
+ 'Sleeping for {} seconds'.format(sleep_time)
+ + ' before DMA server test...')
+ time.sleep(sleep_time)
+ res = conf.check_dma_dummy_included(
+ compute_node, test_name)
+ all_res = all_res and res
+
+ _process_dma_result(
+ compute_node.get_id(), 'Server',
+ all_res, agent_results, compute_node.get_name())
+
+ if agent_infofetch_running:
+ test_name = 'barotest'
+ resources = conf.create_testvm(compute_node, test_name)
+ sleep_time = 5
+ logger.info(
+ 'Sleeping for {} seconds'.format(sleep_time)
+ + ' before DMA infofetch test...')
+ time.sleep(sleep_time)
+ res = conf.test_dma_infofetch_get_data(
+ compute_node, test_name)
+ conf.delete_testvm(resources)
+
+ _process_dma_result(
+ compute_node.get_id(), 'InfoFetch',
+ res, agent_results, compute_node.get_name())
+
+ _print_result_of_dma(compute_ids, agent_results)
+
+ for res in agent_results:
+ if not res[3]:
+ logger.error('Some tests have failed or have not been executed')
+ logger.error('DMA test is Fail')
+ return 1
+ else:
+ pass
+ return 0
diff --git a/baro_tests/tests.py b/baro_tests/tests.py
index 02eca90a..5400510f 100644
--- a/baro_tests/tests.py
+++ b/baro_tests/tests.py
@@ -1,16 +1,19 @@
# -*- coding: utf-8 -*-
-
+#
+# Copyright(c) 2017-2019 Intel Corporation and OPNFV. All rights reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+#
"""Function for testing collectd plug-ins with different oup plug-ins"""
@@ -270,3 +273,15 @@ def test_csv_handles_plugin_data(
logger.info('OK')
return True
+
+
+def test_dma_server_set_collectd(compute, file, logger, client):
+ with open(file, mode='w') as f:
+ f.write('# dummy conf\n')
+ res = client.set(file)
+ if res:
+ logger.info('set collectd PASS')
+ else:
+ logger.error('set collectd FAIL')
+
+ return res