diff options
50 files changed, 2429 insertions, 314 deletions
diff --git a/dashboard/Yardstick-TC002-1456495853488 b/dashboard/Yardstick-TC002-1456495853488 index a0be68402..ca8ad2f8e 100644 --- a/dashboard/Yardstick-TC002-1456495853488 +++ b/dashboard/Yardstick-TC002-1456495853488 @@ -137,14 +137,14 @@ ], "hide": false, "measurement": "opnfv_yardstick_tc002", - "query": "SELECT \"rtt\" FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"pod_name\", \"task_id\", \"deploy_scenario\"", + "query": "SELECT \"rtt.ares\" FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"pod_name\", \"task_id\", \"deploy_scenario\"", "refId": "A", "resultFormat": "time_series", "select": [ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" } @@ -199,111 +199,6 @@ ] ], "tags": [] - }, - { - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": true, - "refId": "C", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - }, - { - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": true, - "refId": "D", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] - }, - { - "dsType": "influxdb", - "groupBy": [ - { - "params": [ - "$interval" - ], - "type": "time" - }, - { - "params": [ - "null" - ], - "type": "fill" - } - ], - "hide": true, - "refId": "E", - "resultFormat": "time_series", - "select": [ - [ - { - "params": [ - "value" - ], - "type": "field" - }, - { - "params": [], - "type": "mean" - } - ] - ], - "tags": [] } ], "timeFrom": "14d", @@ -366,9 +261,9 @@ "repeat": "POD", "scopedVars": { "POD": { + "selected": true, "text": "ericsson-pod2", - "value": "ericsson\\-pod2", - "selected": true + "value": "ericsson\\-pod2" } }, "scroll": true, @@ -425,7 +320,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -433,7 +328,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -479,9 +374,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "huawei-pod1", - "value": "huawei\\-pod1", - "selected": true + "value": "huawei\\-pod1" } }, "scroll": true, @@ -538,7 +433,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -546,7 +441,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -576,7 +471,7 @@ "transform": "table", "transparent": false, "type": "table", - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 2 }, { @@ -594,9 +489,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "huawei-pod2", - "value": "huawei\\-pod2", - "selected": true + "value": "huawei\\-pod2" } }, "scroll": true, @@ -653,7 +548,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -661,7 +556,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -691,7 +586,7 @@ "transform": "table", "transparent": false, "type": "table", - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 2 }, { @@ -709,9 +604,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "intel-pod5", - "value": "intel\\-pod5", - "selected": true + "value": "intel\\-pod5" } }, "scroll": true, @@ -768,7 +663,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -776,7 +671,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -806,7 +701,7 @@ "transform": "table", "transparent": false, "type": "table", - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 2 }, { @@ -824,9 +719,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "intel-pod6", - "value": "intel\\-pod6", - "selected": true + "value": "intel\\-pod6" } }, "scroll": true, @@ -883,7 +778,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -891,7 +786,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -921,7 +816,7 @@ "transform": "table", "transparent": false, "type": "table", - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 2 }, { @@ -939,9 +834,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "lf-pod2", - "value": "lf\\-pod2", - "selected": true + "value": "lf\\-pod2" } }, "scroll": true, @@ -998,7 +893,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -1006,7 +901,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1036,7 +931,7 @@ "transform": "table", "transparent": false, "type": "table", - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 2 }, { @@ -1046,7 +941,7 @@ "error": false, "fontSize": "90%", "height": "", - "id": 18, + "id": 19, "isNew": true, "links": [], "minSpan": 2, @@ -1054,9 +949,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "zte-pod1", - "value": "zte\\-pod1", - "selected": true + "value": "zte\\-pod1" } }, "scroll": true, @@ -1113,7 +1008,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY \"deploy_scenario\"", "rawQuery": false, "refId": "A", "resultFormat": "table", @@ -1121,7 +1016,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1146,12 +1041,12 @@ ] } ], - "timeFrom": "24h", + "timeFrom": "14d", "title": "$POD", "transform": "table", "transparent": false, "type": "table", - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 2 } ], @@ -1212,9 +1107,9 @@ "repeat": "POD", "scopedVars": { "POD": { + "selected": true, "text": "ericsson-pod2", - "value": "ericsson\\-pod2", - "selected": true + "value": "ericsson\\-pod2" } }, "seriesOverrides": [ @@ -1281,7 +1176,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1289,7 +1184,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1378,9 +1273,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "huawei-pod1", - "value": "huawei\\-pod1", - "selected": true + "value": "huawei\\-pod1" } }, "seriesOverrides": [ @@ -1447,7 +1342,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1455,7 +1350,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1494,7 +1389,7 @@ "ms", "short" ], - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 3 }, { @@ -1546,9 +1441,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "huawei-pod2", - "value": "huawei\\-pod2", - "selected": true + "value": "huawei\\-pod2" } }, "seriesOverrides": [ @@ -1615,7 +1510,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1623,7 +1518,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1662,7 +1557,7 @@ "ms", "short" ], - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 3 }, { @@ -1714,9 +1609,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "intel-pod5", - "value": "intel\\-pod5", - "selected": true + "value": "intel\\-pod5" } }, "seriesOverrides": [ @@ -1783,7 +1678,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1791,7 +1686,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1830,7 +1725,7 @@ "ms", "short" ], - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 3 }, { @@ -1882,9 +1777,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "intel-pod6", - "value": "intel\\-pod6", - "selected": true + "value": "intel\\-pod6" } }, "seriesOverrides": [ @@ -1951,7 +1846,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -1959,7 +1854,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -1998,7 +1893,7 @@ "ms", "short" ], - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 3 }, { @@ -2050,9 +1945,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "lf-pod2", - "value": "lf\\-pod2", - "selected": true + "value": "lf\\-pod2" } }, "seriesOverrides": [ @@ -2119,7 +2014,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2127,7 +2022,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -2166,7 +2061,7 @@ "ms", "short" ], - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 3 }, { @@ -2189,7 +2084,7 @@ "threshold2Color": "rgba(234, 112, 112, 0.22)" }, "height": "", - "id": 19, + "id": 20, "interval": "", "isNew": true, "leftYAxisLabel": "<RTT>", @@ -2218,9 +2113,9 @@ "repeat": null, "scopedVars": { "POD": { + "selected": true, "text": "zte-pod1", - "value": "zte\\-pod1", - "selected": true + "value": "zte\\-pod1" } }, "seriesOverrides": [ @@ -2287,7 +2182,7 @@ } ], "measurement": "opnfv_yardstick_tc002", - "query": "SELECT mean(\"rtt\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", + "query": "SELECT mean(\"rtt.ares\") FROM \"opnfv_yardstick_tc002\" WHERE \"pod_name\" =~ /$POD$/ AND \"deploy_scenario\" =~ /$SCENARIO$/ AND $timeFilter GROUP BY time(24h), \"deploy_scenario\", \"pod_name\" fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -2295,7 +2190,7 @@ [ { "params": [ - "rtt" + "rtt.ares" ], "type": "field" }, @@ -2334,7 +2229,7 @@ "ms", "short" ], - "repeatIteration": 1467967293010, + "repeatIteration": 1469865492903, "repeatPanelId": 3 } ], @@ -2343,7 +2238,7 @@ } ], "time": { - "from": "now-24h", + "from": "now-7d", "to": "now" }, "timepicker": { @@ -2398,84 +2293,84 @@ "name": "POD", "options": [ { + "selected": false, "text": "All", - "value": "(elxg482ls42|ericsson\\-pod1|ericsson\\-pod2|huawei\\-pod1|huawei\\-pod2|huawei\\-us\\-deploy\\-bare\\-1|intel\\-pod5|intel\\-pod6|lf\\-pod1|lf\\-pod2|opnfv\\-jump\\-1|opnfv\\-jump\\-2|orange\\-fr\\-pod2|unknown|zte\\-pod1)", - "selected": false + "value": "(elxg482ls42|ericsson\\-pod1|ericsson\\-pod2|huawei\\-pod1|huawei\\-pod2|huawei\\-us\\-deploy\\-bare\\-1|intel\\-pod5|intel\\-pod6|lf\\-pod1|lf\\-pod2|opnfv\\-jump\\-1|opnfv\\-jump\\-2|orange\\-fr\\-pod2|unknown|zte\\-pod1)" }, { + "selected": false, "text": "elxg482ls42", - "value": "elxg482ls42", - "selected": false + "value": "elxg482ls42" }, { + "selected": false, "text": "ericsson-pod1", - "value": "ericsson\\-pod1", - "selected": false + "value": "ericsson\\-pod1" }, { + "selected": true, "text": "ericsson-pod2", - "value": "ericsson\\-pod2", - "selected": true + "value": "ericsson\\-pod2" }, { + "selected": true, "text": "huawei-pod1", - "value": "huawei\\-pod1", - "selected": true + "value": "huawei\\-pod1" }, { + "selected": true, "text": "huawei-pod2", - "value": "huawei\\-pod2", - "selected": true + "value": "huawei\\-pod2" }, { + "selected": false, "text": "huawei-us-deploy-bare-1", - "value": "huawei\\-us\\-deploy\\-bare\\-1", - "selected": false + "value": "huawei\\-us\\-deploy\\-bare\\-1" }, { + "selected": true, "text": "intel-pod5", - "value": "intel\\-pod5", - "selected": true + "value": "intel\\-pod5" }, { + "selected": true, "text": "intel-pod6", - "value": "intel\\-pod6", - "selected": true + "value": "intel\\-pod6" }, { + "selected": false, "text": "lf-pod1", - "value": "lf\\-pod1", - "selected": false + "value": "lf\\-pod1" }, { + "selected": true, "text": "lf-pod2", - "value": "lf\\-pod2", - "selected": true + "value": "lf\\-pod2" }, { + "selected": false, "text": "opnfv-jump-1", - "value": "opnfv\\-jump\\-1", - "selected": false + "value": "opnfv\\-jump\\-1" }, { + "selected": false, "text": "opnfv-jump-2", - "value": "opnfv\\-jump\\-2", - "selected": false + "value": "opnfv\\-jump\\-2" }, { + "selected": false, "text": "orange-fr-pod2", - "value": "orange\\-fr\\-pod2", - "selected": false + "value": "orange\\-fr\\-pod2" }, { + "selected": false, "text": "unknown", - "value": "unknown", - "selected": false + "value": "unknown" }, { + "selected": true, "text": "zte-pod1", - "value": "zte\\-pod1", - "selected": true + "value": "zte\\-pod1" } ], "query": "SHOW TAG VALUES WITH KEY = \"pod_name\"", @@ -2558,6 +2453,6 @@ }, "refresh": "15m", "schemaVersion": 8, - "version": 2, + "version": 5, "links": [] } diff --git a/docs/userguide/03-list-of-tcs.rst b/docs/userguide/03-list-of-tcs.rst index 4f986cd78..96e5297a1 100644 --- a/docs/userguide/03-list-of-tcs.rst +++ b/docs/userguide/03-list-of-tcs.rst @@ -44,6 +44,7 @@ Generic NFVI Test Case Descriptions opnfv_yardstick_tc044.rst opnfv_yardstick_tc055.rst opnfv_yardstick_tc061.rst + opnfv_yardstick_tc063.rst opnfv_yardstick_tc069.rst opnfv_yardstick_tc070.rst opnfv_yardstick_tc071.rst diff --git a/docs/userguide/opnfv_yardstick_tc004.rst b/docs/userguide/opnfv_yardstick_tc004.rst new file mode 100644 index 000000000..301286126 --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc004.rst @@ -0,0 +1,77 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +************************************* +Yardstick Test Case Description TC004 +************************************* + +.. _cachestat: https://github.com/brendangregg/perf-tools/tree/master/fs + ++-----------------------------------------------------------------------------+ +|Cache Utilization | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC004_Cache Utilization | +| | | ++--------------+--------------------------------------------------------------+ +|metric | Cache Utilization | +| | | ++--------------+--------------------------------------------------------------+ +|test purpose | To evaluate the IaaS compute capability with regards to | +| | cache utilization.This test case should be run in parallel | +| | to other Yardstick test cases and not run as a stand-alone | +| | test case. | +| | Measure the cache usage statistics including cache hit, | +| | cache miss, hit ratio, page cache size and page cache size. | +| | Both average and maximun values are obtained. | +| | The purpose is also to be able to spot trends. | +| | Test results, graphs and similar shall be stored for | +| | comparison reasons and product evolution understanding | +| | between different OPNFV versions and/or configurations. | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | File: cachestat.yaml (in the 'samples' directory) | +| | | +| | * interval: 1 - repeat, pausing every 1 seconds in-between. | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | cachestat | +| | | +| | cachestat is not always part of a Linux distribution, hence | +| | it needs to be installed. | +| | | ++--------------+--------------------------------------------------------------+ +|references | cachestat_ | +| | | +| | ETSI-NFV-TST001 | +| | | ++--------------+--------------------------------------------------------------+ +|applicability | Test can be configured with different: | +| | | +| | * interval; | +| | * runner Duration. | +| | | +| | There are default values for each above-mentioned option. | +| | Run in background with other test cases. | +| | | ++--------------+--------------------------------------------------------------+ +|pre-test | The test case image needs to be installed into Glance | +|conditions | with cachestat included in the image. | +| | | +| | No POD specific requirements have been identified. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | The host is installed as client. The related TC, or TCs, is | +| | invoked and cachestat logs are produced and stored. | +| | | +| | Result: logs are stored. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | None. Cache utilization results are fetched and stored. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc052.rst b/docs/userguide/opnfv_yardstick_tc052.rst new file mode 100644 index 000000000..9514b6819 --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc052.rst @@ -0,0 +1,141 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Yin Kanglin and others. +.. 14_ykl@tongji.edu.cn + +************************************* +Yardstick Test Case Description TC052 +************************************* + ++-----------------------------------------------------------------------------+ +|OpenStack Controller Node Disk I/O Block High Availability | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC052: OpenStack Controller Node Disk I/O | +| | Block High Availability | ++--------------+--------------------------------------------------------------+ +|test purpose | This test case will verify the high availability of control | +| | node. When the disk I/O of a specified disk is blocked, | +| | which breaks down the Openstack services on this node. Read | +| | and write services should still be accessed by other | +| | controller nodes, and the services on failed controller node | +| | should be isolated. | ++--------------+--------------------------------------------------------------+ +|test method | This test case blocks the disk I/O of a specified control | +| | node, then checks whether the services that need to read or | +| | wirte the disk of the control node are OK with some monitor | +| | tools. | ++--------------+--------------------------------------------------------------+ +|attackers | In this test case, an attacker called "disk-block" is | +| | needed. This attacker includes two parameters: | +| | 1) fault_type: which is used for finding the attacker's | +| | scripts. It should be always set to "disk-block" in this | +| | test case. | +| | 2) host: which is the name of a control node being attacked. | +| | e.g. | +| | -fault_type: "disk-block" | +| | -host: node1 | ++--------------+--------------------------------------------------------------+ +|monitors | In this test case, two kinds of monitor are needed: | +| | 1. the "openstack-cmd" monitor constantly request a specific | +| | Openstack command, which needs two parameters: | +| | 1) monitor_type: which is used for finding the monitor class | +| | and related scripts. It should be always set to | +| | "openstack-cmd" for this monitor. | +| | 2) command_name: which is the command name used for request. | +| | | +| | e.g. | +| | -monitor_type: "openstack-cmd" | +| | -command_name: "nova flavor-list" | +| | | +| | 2. the second monitor verifies the read and write function | +| | by a "operation" and a "result checker". | +| | the "operation" have two parameters: | +| | 1) operation_type: which is used for finding the operation | +| | class and related scripts. | +| | 2) action_parameter: parameters for the operation. | +| | the "result checker" have three parameters: | +| | 1) checker_type: which is used for finding the reuslt | +| | checker class and realted scripts. | +| | 2) expectedValue: the expected value for the output of the | +| | checker script. | +| | 3) condition: whether the expected value is in the output of | +| | checker script or is totally same with the output. | +| | | +| | In this case, the "operation" adds a flavor and the "result | +| | checker" checks whether ths flavor is created. Their | +| | parameters show as follows: | +| | operation: | +| | -operation_type: "nova-create-flavor" | +| | -action_parameter: | +| | flavorconfig: "test-001 test-001 100 1 1" | +| | result checker: | +| | -checker_type: "check-flavor" | +| | -expectedValue: "test-001" | +| | -condition: "in" | ++--------------+--------------------------------------------------------------+ +|metrics | In this test case, there is one metric: | +| | 1)service_outage_time: which indicates the maximum outage | +| | time (seconds) of the specified Openstack command request. | ++--------------+--------------------------------------------------------------+ +|test tool | Developed by the project. Please see folder: | +| | "yardstick/benchmark/scenarios/availability/ha_tools" | +| | | ++--------------+--------------------------------------------------------------+ +|references | ETSI NFV REL001 | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | This test case needs two configuration files: | +| | 1) test case file: opnfv_yardstick_tc052.yaml | +| | -Attackers: see above "attackers" discription | +| | -waiting_time: which is the time (seconds) from the process | +| | being killed to stoping monitors the monitors | +| | -Monitors: see above "monitors" discription | +| | -SLA: see above "metrics" discription | +| | | +| | 2)POD file: pod.yaml | +| | The POD configuration should record on pod.yaml first. | +| | the "host" item in this test case will use the node name in | +| | the pod.yaml. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | do attacker: connect the host through SSH, and then execute | +| | the block disk I/O script on the host. | +| | | +| | Result: The disk I/O of the host will be blocked | +| | | ++--------------+--------------------------------------------------------------+ +|step 2 | start monitors: | +| | each monitor will run with independently process | +| | | +| | Result: The monitor info will be collected. | +| | | ++--------------+--------------------------------------------------------------+ +|step 3 | do operation: add a flavor | +| | | ++--------------+--------------------------------------------------------------+ +|step 4 | do result checker: check whether the falvor is created | +| | | ++--------------+--------------------------------------------------------------+ +|step 5 | stop monitors after a period of time specified by | +| | "waiting_time" | +| | | +| | Result: The monitor info will be aggregated. | +| | | ++--------------+--------------------------------------------------------------+ +|step 6 | verify the SLA | +| | | +| | Result: The test case is passed or not. | +| | | ++--------------+--------------------------------------------------------------+ +|post-action | It is the action when the test cases exist. It excutes the | +| | release disk I/O script to release the blocked I/O. | ++--------------+--------------------------------------------------------------+ +|test verdict | Fails if monnitor SLA is not passed or the result checker is | +| | not passed, or if there is a test case execution problem. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc053.rst b/docs/userguide/opnfv_yardstick_tc053.rst new file mode 100644 index 000000000..8808d12d9 --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc053.rst @@ -0,0 +1,142 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Yin Kanglin and others. +.. 14_ykl@tongji.edu.cn + +************************************* +Yardstick Test Case Description TC053 +************************************* + ++-----------------------------------------------------------------------------+ +|OpenStack Controller Load Balance Service High Availability | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC053: OpenStack Controller Load Balance | +| | Service High Availability- | ++--------------+--------------------------------------------------------------+ +|test purpose | This test case will verify the high availability of the | +| | load balance service(current is HAProxy) that supports | +| | OpenStack on controller node. When the load balance service | +| | of a specified controller node is killed, whether other load | +| | balancers on other controller nodes will work, and whether | +| | the controller node will restart the load balancer are | +| | checked. | ++--------------+--------------------------------------------------------------+ +|test method | This test case kills the processes of load balance service | +| | on a selected control node, then checks whether the request | +| | of the related Openstack command is OK and the killed | +| | processes are recovered. | ++--------------+--------------------------------------------------------------+ +|attackers | In this test case, an attacker called "kill-process" is | +| | needed. This attacker includes three parameters: | +| | 1) fault_type: which is used for finding the attacker's | +| | scripts. It should be always set to "kill-process" in this | +| | test case. | +| | 2) process_name: which is the process name of the specified | +| | OpenStack service. If there are multiple processes use the | +| | same name on the host, all of them are killed by this | +| | attacker. | +| | In this case. This parameter should always set to "swift- | +| | proxy". | +| | 3) host: which is the name of a control node being attacked. | +| | | +| | e.g. | +| | -fault_type: "kill-process" | +| | -process_name: "haproxy" | +| | -host: node1 | +| | | ++--------------+--------------------------------------------------------------+ +|monitors | In this test case, two kinds of monitor are needed: | +| | 1. the "openstack-cmd" monitor constantly request a specific | +| | Openstack command, which needs two parameters: | +| | 1) monitor_type: which is used for finding the monitor class | +| | and related scritps. It should be always set to | +| | "openstack-cmd" for this monitor. | +| | 2) command_name: which is the command name used for request. | +| | | +| | 2. the "process" monitor check whether a process is running | +| | on a specific node, which needs three parameters: | +| | 1) monitor_type: which used for finding the monitor class | +| | and related scripts. It should be always set to "process" | +| | for this monitor. | +| | 2) process_name: which is the process name for monitor | +| | 3) host: which is the name of the node runing the process | +| | In this case, the command_name of monitor1 should be | +| | services that is supported by load balancer and the process- | +| | name of monitor2 should be "haproxy", for example: | +| | | +| | e.g. | +| | monitor1: | +| | -monitor_type: "openstack-cmd" | +| | -command_name: "nova image-list" | +| | monitor2: | +| | -monitor_type: "process" | +| | -process_name: "haproxy" | +| | -host: node1 | +| | | ++--------------+--------------------------------------------------------------+ +|metrics | In this test case, there are two metrics: | +| | 1)service_outage_time: which indicates the maximum outage | +| | time (seconds) of the specified Openstack command request. | +| | 2)process_recover_time: which indicates the maximun time | +| | (seconds) from the process being killed to recovered | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | Developed by the project. Please see folder: | +| | "yardstick/benchmark/scenarios/availability/ha_tools" | +| | | ++--------------+--------------------------------------------------------------+ +|references | ETSI NFV REL001 | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | This test case needs two configuration files: | +| | 1) test case file: opnfv_yardstick_tc053.yaml | +| | -Attackers: see above "attackers" discription | +| | -waiting_time: which is the time (seconds) from the process | +| | being killed to stoping monitors the monitors | +| | -Monitors: see above "monitors" discription | +| | -SLA: see above "metrics" discription | +| | | +| | 2)POD file: pod.yaml | +| | The POD configuration should record on pod.yaml first. | +| | the "host" item in this test case will use the node name in | +| | the pod.yaml. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | start monitors: | +| | each monitor will run with independently process | +| | | +| | Result: The monitor info will be collected. | +| | | ++--------------+--------------------------------------------------------------+ +|step 2 | do attacker: connect the host through SSH, and then execute | +| | the kill process script with param value specified by | +| | "process_name" | +| | | +| | Result: Process will be killed. | +| | | ++--------------+--------------------------------------------------------------+ +|step 3 | stop monitors after a period of time specified by | +| | "waiting_time" | +| | | +| | Result: The monitor info will be aggregated. | +| | | ++--------------+--------------------------------------------------------------+ +|step 4 | verify the SLA | +| | | +| | Result: The test case is passed or not. | +| | | ++--------------+--------------------------------------------------------------+ +|post-action | It is the action when the test cases exist. It will check | +| | the status of the specified process on the host, and restart | +| | the process if it is not running for next test cases. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | Fails only if SLA is not passed, or if there is a test case | +| | execution problem. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc054.rst b/docs/userguide/opnfv_yardstick_tc054.rst new file mode 100644 index 000000000..7f92be2bc --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc054.rst @@ -0,0 +1,125 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Yin Kanglin and others. +.. 14_ykl@tongji.edu.cn + +************************************* +Yardstick Test Case Description TC054 +************************************* + ++-----------------------------------------------------------------------------+ +|OpenStack Virtual IP High Availability | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC054: OpenStack Virtual IP High | +| | Availability | ++--------------+--------------------------------------------------------------+ +|test purpose | This test case will verify the high availability for virtual | +| | ip in the environment. When master node of virtual ip is | +| | abnormally shutdown, connection to virtual ip and | +| | the services binded to the virtual IP it should be OK. | ++--------------+--------------------------------------------------------------+ +|test method | This test case shutdowns the virtual IP master node with | +| | some fault injection tools, then checks whether virtual ips | +| | can be pinged and services binded to virtual ip are OK with | +| | some monitor tools. | ++--------------+--------------------------------------------------------------+ +|attackers | In this test case, an attacker called "control-shutdown" is | +| | needed. This attacker includes two parameters: | +| | 1) fault_type: which is used for finding the attacker's | +| | scripts. It should be always set to "control-shutdown" in | +| | this test case. | +| | 2) host: which is the name of a control node being attacked. | +| | | +| | In this case the host should be the virtual ip master node, | +| | that means the host ip is the virtual ip, for exapmle: | +| | -fault_type: "control-shutdown" | +| | -host: node1(the VIP Master node) | ++--------------+--------------------------------------------------------------+ +|monitors | In this test case, two kinds of monitor are needed: | +| | 1. the "ip_status" monitor that pings a specific ip to check | +| | the connectivity of this ip, which needs two parameters: | +| | 1) monitor_type: which is used for finding the monitor class | +| | and related scripts. It should be always set to "ip_status" | +| | for this monitor. | +| | 2) ip_address: The ip to be pinged. In this case, ip_address | +| | should be the virtual IP. | +| | | +| | 2. the "openstack-cmd" monitor constantly request a specific | +| | Openstack command, which needs two parameters: | +| | 1) monitor_type: which is used for finding the monitor class | +| | and related scripts. It should be always set to | +| | "openstack-cmd" for this monitor. | +| | 2) command_name: which is the command name used for request. | +| | | +| | e.g. | +| | monitor1: | +| | -monitor_type: "ip_status" | +| | -host: 192.168.0.2 | +| | monitor2: | +| | -monitor_type: "openstack-cmd" | +| | -command_name: "nova image-list" | +| | | ++--------------+--------------------------------------------------------------+ +|metrics | In this test case, there are two metrics: | +| | 1) ping_outage_time: which-indicates the maximum outage time | +| | to ping the specified host. | +| | 2)service_outage_time: which indicates the maximum outage | +| | time (seconds) of the specified Openstack command request. | ++--------------+--------------------------------------------------------------+ +|test tool | Developed by the project. Please see folder: | +| | "yardstick/benchmark/scenarios/availability/ha_tools" | +| | | ++--------------+--------------------------------------------------------------+ +|references | ETSI NFV REL001 | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | This test case needs two configuration files: | +| | 1) test case file: opnfv_yardstick_tc054.yaml | +| | -Attackers: see above "attackers" discription | +| | -waiting_time: which is the time (seconds) from the process | +| | being killed to stoping monitors the monitors | +| | -Monitors: see above "monitors" discription | +| | -SLA: see above "metrics" discription | +| | | +| | 2)POD file: pod.yaml | +| | The POD configuration should record on pod.yaml first. | +| | the "host" item in this test case will use the node name in | +| | the pod.yaml. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | start monitors: | +| | each monitor will run with independently process | +| | | +| | Result: The monitor info will be collected. | +| | | ++--------------+--------------------------------------------------------------+ +|step 2 | do attacker: connect the host through SSH, and then execute | +| | the shutdown script on the VIP master node. | +| | | +| | Result: VIP master node will be shutdown | +| | | ++--------------+--------------------------------------------------------------+ +|step 3 | stop monitors after a period of time specified by | +| | "waiting_time" | +| | | +| | Result: The monitor info will be aggregated. | +| | | ++--------------+--------------------------------------------------------------+ +|step 4 | verify the SLA | +| | | +| | Result: The test case is passed or not. | +| | | ++--------------+--------------------------------------------------------------+ +|post-action | It is the action when the test cases exist. It restarts the | +| | original VIP master node if it is not restarted. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | Fails only if SLA is not passed, or if there is a test case | +| | execution problem. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc061.rst b/docs/userguide/opnfv_yardstick_tc061.rst new file mode 100644 index 000000000..1d424414e --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc061.rst @@ -0,0 +1,88 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +************************************* +Yardstick Test Case Description TC061 +************************************* + +.. _man-pages: http://linux.die.net/man/1/sar + ++-----------------------------------------------------------------------------+ +|Network Utilization | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC061_Network Utilization | +| | | ++--------------+--------------------------------------------------------------+ +|metric | Network utilization | +| | | ++--------------+--------------------------------------------------------------+ +|test purpose | To evaluate the IaaS network capability with regards to | +| | network utilization, including Total number of packets | +| | received per second, Total number of packets transmitted per | +| | second, Total number of kilobytes received per second, Total | +| | number of kilobytes transmitted per second, Number of | +| | compressed packets received per second (for cslip etc.), | +| | Number of compressed packets transmitted per second, Number | +| | of multicast packets received per second, Utilization | +| | percentage of the network interface. | +| | This test case should be run in parallel to other Yardstick | +| | test cases and not run as a stand-alone test case. | +| | Measure the network usage statistics from the network devices| +| | Average, minimum and maximun values are obtained. | +| | The purpose is also to be able to spot trends. | +| | Test results, graphs and similar shall be stored for | +| | comparison reasons and product evolution understanding | +| | between different OPNFV versions and/or configurations. | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | File: netutilization.yaml (in the 'samples' directory) | +| | | +| | * interval: 1 - repeat, pausing every 1 seconds in-between. | +| | * count: 1 - display statistics 1 times, then exit. | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | sar | +| | | +| | The sar command writes to standard output the contents of | +| | selected cumulative activity counters in the operating | +| | system. | +| | sar is normally part of a Linux distribution, hence it | +| | doesn't needs to be installed. | +| | | ++--------------+--------------------------------------------------------------+ +|references | man-pages_ | +| | | +| | ETSI-NFV-TST001 | +| | | ++--------------+--------------------------------------------------------------+ +|applicability | Test can be configured with different: | +| | | +| | * interval; | +| | * count; | +| | * runner Iteration and intervals. | +| | | +| | There are default values for each above-mentioned option. | +| | Run in background with other test cases. | +| | | ++--------------+--------------------------------------------------------------+ +|pre-test | The test case image needs to be installed into Glance | +|conditions | with sar included in the image. | +| | | +| | No POD specific requirements have been identified. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result. | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | The host is installed as client. The related TC, or TCs, is | +| | invoked and sar logs are produced and stored. | +| | | +| | Result: logs are stored. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | None. Network utilization results are fetched and stored. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc063.rst b/docs/userguide/opnfv_yardstick_tc063.rst new file mode 100644 index 000000000..a77653aa5 --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc063.rst @@ -0,0 +1,81 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +************************************* +Yardstick Test Case Description TC063 +************************************* + +.. _iostat: http://linux.die.net/man/1/iostat +.. _fdisk: http://www.tldp.org/HOWTO/Partition/fdisk_partitioning.html + ++-----------------------------------------------------------------------------+ +|Storage Capacity | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC063_Storage Capacity | +| | | ++--------------+--------------------------------------------------------------+ +|metric | Storage/disk size, block size | +| | Disk Utilization | ++--------------+--------------------------------------------------------------+ +|test purpose | This test case will check the parameters which could decide | +| | several models and each model has its specified task to | +| | measure. The test purposes are to measure disk size, block | +| | size and disk utilization. With the test results, we could | +| | evaluate the storage capacity of the host. | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | file: opnfv_yardstick_tc063.yaml | +| | | +| |* test_type: "disk_size" | +| |* runner: | +| | type: Iteration | +| | iterations: 1 - test is run 1 time iteratively. | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | fdisk | +| | A command-line utility that provides disk partitioning | +| | functions | +| | | +| | iostat | +| | This is a computer system monitor tool used to collect and | +| | show operating system storage input and output statistics. | ++--------------+--------------------------------------------------------------+ +|references | iostat_ | +| | fdisk_ | +| | | +| | ETSI-NFV-TST001 | +| | | ++--------------+--------------------------------------------------------------+ +|applicability | Test can be configured with different: | +| | | +| | * test_type: "disk size", "block size", "disk utilization" | +| | * interval: 1 - how ofter to stat disk utilization | +| | type: int | +| | unit: seconds | +| | * count: 15 - how many times to stat disk utilization | +| | type: int | +| | unit: na | +| | There are default values for each above-mentioned option. | +| | Run in background with other test cases. | +| | | ++--------------+--------------------------------------------------------------+ +|pre-test | The test case image needs to be installed into Glance | +|conditions | | +| | No POD specific requirements have been identified. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | Output the specific storage capacity of disk information as | +| | the sequence into file. | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | The pod is available and the hosts are installed. Node5 is | +| | used and logs are produced and stored. | +| | | +| | Result: Logs are stored. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | None. | ++--------------+--------------------------------------------------------------+ diff --git a/docs/userguide/opnfv_yardstick_tc074.rst b/docs/userguide/opnfv_yardstick_tc074.rst new file mode 100644 index 000000000..c938f5dfd --- /dev/null +++ b/docs/userguide/opnfv_yardstick_tc074.rst @@ -0,0 +1,137 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International +.. License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) OPNFV, Huawei Technologies Co.,Ltd and others. + +************************************* +Yardstick Test Case Description TC074 +************************************* + +.. Storperf: https://wiki.opnfv.org/display/storperf/Storperf + ++-----------------------------------------------------------------------------+ +|Storperf | +| | ++--------------+--------------------------------------------------------------+ +|test case id | OPNFV_YARDSTICK_TC074_Storperf | +| | | ++--------------+--------------------------------------------------------------+ +|metric | Storage performance | +| | | ++--------------+--------------------------------------------------------------+ +|test purpose | Storperf integration with yardstick. The purpose of StorPerf | +| | is to provide a tool to measure block and object storage | +| | performance in an NFVI. When complemented with a | +| | characterization of typical VF storage performance | +| | requirements, it can provide pass/fail thresholds for test, | +| | staging, and production NFVI environments. | +| | | +| | The benchmarks developed for block and object storage will | +| | be sufficiently varied to provide a good preview of expected | +| | storage performance behavior for any type of VNF workload. | +| | | ++--------------+--------------------------------------------------------------+ +|configuration | file: opnfv_yardstick_tc074.yaml | +| | | +| | * agent_count: 1 - the number of VMs to be created | +| | * agent_image: "Ubuntu-14.04" - image used for creating VMs | +| | * public_network: "ext-net" - name of public network | +| | * volume_size: 2 - cinder volume size | +| | * block_sizes: "4096" - data block size | +| | * queue_depths: "4" | +| | * StorPerf_ip: "192.168.200.2" | +| | * query_interval: 10 - state query interval | +| | * timeout: 600 - maximum allowed job time | +| | | ++--------------+--------------------------------------------------------------+ +|test tool | Storperf | +| | | +| | StorPerf is a tool to measure block and object storage | +| | performance in an NFVI. | +| | | +| | StorPerf is delivered as a Docker container from | +| | https://hub.docker.com/r/opnfv/storperf/tags/. | +| | | ++--------------+--------------------------------------------------------------+ +|references | Storperf_ | +| | | +| | ETSI-NFV-TST001 | +| | | ++--------------+--------------------------------------------------------------+ +|applicability | Test can be configured with different: | +| | | +| | * agent_count | +| | * volume_size | +| | * block_sizes | +| | * queue_depths | +| | * query_interval | +| | * timeout | +| | * target=[device or path] | +| | The path to either an attached storage device | +| | (/dev/vdb, etc) or a directory path (/opt/storperf) that | +| | will be used to execute the performance test. In the case | +| | of a device, the entire device will be used. If not | +| | specified, the current directory will be used. | +| | * workload=[workload module] | +| | If not specified, the default is to run all workloads. The | +| | workload types are: | +| | - rs: 100% Read, sequential data | +| | - ws: 100% Write, sequential data | +| | - rr: 100% Read, random access | +| | - wr: 100% Write, random access | +| | - rw: 70% Read / 30% write, random access | +| | * nossd: Do not perform SSD style preconditioning. | +| | * nowarm: Do not perform a warmup prior to | +| | measurements. | +| | * report= [job_id] | +| | Query the status of the supplied job_id and report on | +| | metrics. If a workload is supplied, will report on only | +| | that subset. | +| | | +| | There are default values for each above-mentioned option. | +| | | ++--------------+--------------------------------------------------------------+ +|pre-test | If you do not have an Ubuntu 14.04 image in Glance, you will | +|conditions | need to add one. A key pair for launching agents is also | +| | required. | +| | | +| | Storperf is required to be installed in the environment. | +| | There are two possible methods for Storperf installation: | +| | Run container on Jump Host | +| | Run container in a VM | +| | | +| | Running StorPerf on Jump Host | +| | Requirements: | +| | - Docker must be installed | +| | - Jump Host must have access to the OpenStack Controller | +| | API | +| | - Jump Host must have internet connectivity for | +| | downloading docker image | +| | - Enough floating IPs must be available to match your | +| | agent count | +| | | +| | Running StorPerf in a VM | +| | Requirements: | +| | - VM has docker installed | +| | - VM has OpenStack Controller credentials and can | +| | communicate with the Controller API | +| | - VM has internet connectivity for downloading the | +| | docker image | +| | - Enough floating IPs must be available to match your | +| | agent count | +| | | +| | No POD specific requirements have been identified. | +| | | ++--------------+--------------------------------------------------------------+ +|test sequence | description and expected result | +| | | ++--------------+--------------------------------------------------------------+ +|step 1 | The Storperf is installed and Ubuntu 14.04 image is stored | +| | in glance. TC is invoked and logs are produced and stored. | +| | | +| | Result: Logs are stored. | +| | | ++--------------+--------------------------------------------------------------+ +|test verdict | None. Storage performance results are fetched and stored. | +| | | ++--------------+--------------------------------------------------------------+ diff --git a/plugin/CI/storperf.yaml b/plugin/CI/storperf.yaml new file mode 100644 index 000000000..4407ddf8c --- /dev/null +++ b/plugin/CI/storperf.yaml @@ -0,0 +1,13 @@ +--- +# StorPerf plugin configration file for huawei-pod1 +# Used for integration StorPerf into Yardstick as a plugin + +schema: "yardstick:plugin:0.1" + +plugins: + name: storperf + +deployment: + ip: local + user: root + password: root diff --git a/samples/networkcapacity.yaml b/samples/networkcapacity.yaml new file mode 100644 index 000000000..7c62cc2b5 --- /dev/null +++ b/samples/networkcapacity.yaml @@ -0,0 +1,21 @@ +--- +# Sample benchmark task config file +# Measure compute capacity and scale. +# Including number of cores, number of threads, available memory size and +# cache size. + +schema: "yardstick:task:0.1" + +scenarios: +- + type: NetworkCapacity + host: node1.LF + + runner: + type: Iteration + iterations: 1 + +context: + type: Node + name: LF + file: etc/yardstick/nodes/compass_sclab_virtual/pod.yaml diff --git a/samples/storagecapacity.yaml b/samples/storagecapacity.yaml new file mode 100644 index 000000000..e3b282d14 --- /dev/null +++ b/samples/storagecapacity.yaml @@ -0,0 +1,24 @@ +--- +# Sample benchmark task config file +# Measure storage capacity and scale. +# Including number of PVs, volume of disk size, +# and block size of each device. + +schema: "yardstick:task:0.1" + +scenarios: +- + type: StorageCapacity + options: + test_type: "disk_size" + + host: node5.LF + + runner: + type: Iteration + iterations: 1 + +context: + type: Node + name: LF + file: etc/yardstick/nodes/compass_sclab_virtual/pod.yaml diff --git a/tests/ci/docker/yardstick-ci/Dockerfile b/tests/ci/docker/yardstick-ci/Dockerfile index cc23073d2..684f47122 100644 --- a/tests/ci/docker/yardstick-ci/Dockerfile +++ b/tests/ci/docker/yardstick-ci/Dockerfile @@ -20,6 +20,7 @@ ENV RELENG_REPO_DIR ${REPOS_DIR}/releng RUN apt-get update && apt-get install -y \ wget \ + curl \ git \ sshpass \ qemu-utils \ @@ -48,5 +49,6 @@ RUN cd ${YARDSTICK_REPO_DIR} && pip install -r tests/ci/requirements.txt RUN cd ${YARDSTICK_REPO_DIR} && pip install . ADD http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img /home/opnfv/images/ +ADD http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img /home/opnfv/images/ COPY ./exec_tests.sh /usr/local/bin/ diff --git a/tests/ci/yardstick-verify b/tests/ci/yardstick-verify index c83193574..bdb91003d 100755 --- a/tests/ci/yardstick-verify +++ b/tests/ci/yardstick-verify @@ -42,7 +42,7 @@ EOF DISPATCHER_TYPE=file DISPATCHER_FILE_NAME="/tmp/yardstick.out" -DISPATCHER_HTTP_TARGET= +DISPATCHER_HTTP_TARGET="http://testresults.opnfv.org/test/api/v1/results" DISPATCHER_INFLUXDB_TARGET= while getopts "r:i:h" OPTION; do @@ -80,7 +80,7 @@ cleanup() return fi - for image in $(glance image-list | grep -e cirros-0.3.3 -e yardstick-trusty-server | awk '{print $2}'); do + for image in $(glance image-list | grep -e cirros-0.3.3 -e yardstick-trusty-server -e Ubuntu-14.04 | awk '{print $2}'); do echo "Deleting image $image..." glance image-delete $image || true done @@ -121,6 +121,23 @@ install_yardstick() pip install . } +install_storperf() +{ + # Install Storper on huawei-pod1 + if [ "$NODE_NAME" == "huawei-pod1" ]; then + echo + echo "========== Installing storperf ==========" + + if ! yardstick -d plugin install plugin/CI/storperf.yaml; then + echo "Install storperf plugin FAILED"; + exit 1 + fi + + echo + echo "========== Installed storperf container ==========" + fi +} + build_yardstick_image() { echo @@ -174,6 +191,30 @@ load_cirros_image() echo "Cirros image id: $CIRROS_IMAGE_ID" } +load_ubuntu_image() +{ + echo + echo "========== Loading ubuntu cloud image ==========" + + local ubuntu_image_file=/home/opnfv/images/trusty-server-cloudimg-amd64-disk1.img + + output=$(glance image-create \ + --name Ubuntu-14.04 \ + --disk-format qcow2 \ + --container-format bare \ + --file $ubuntu_image_file) + echo "$output" + + UBUNTU_IMAGE_ID=$(echo "$output" | grep " id " | awk '{print $(NF-1)}') + + if [ -z "$UBUNTU_IMAGE_ID" ]; then + echo 'Failed uploading UBUNTU image to cloud'. + exit 1 + fi + + echo "Ubuntu image id: $UBUNTU_IMAGE_ID" +} + load_yardstick_image() { echo @@ -255,13 +296,28 @@ EOF done + local sceanrio_status="SUCCESS" + if [ $failed -gt 0 ]; then + scenario_status="FAILED" + fi + curl -i -H 'content-type: application/json' -X POST -d \ + "{\"project_name\": \"yardstick\", + \"pod_name\":\"${NODE_NAME}\", + \"installer\":\"${INSTALLER_TYPE}\", + \"description\": \"yardstick ci scenario status\", + \"case_name\": \"scenario_status\", + \"version\":\"${YARDSTICK_BRANCH}\", + \"scenario\":\"${DEPLOY_SCENARIO}\", + \"details\":\"${sceanrio_status}\"}" \ + ${DISPATCHER_HTTP_TARGET} + if [ $failed -gt 0 ]; then echo "---------------------------" echo "$failed out of ${SUITE_FILES[*]} test suites FAILED" echo "---------------------------" exit 1 - fi + fi else @@ -361,8 +417,10 @@ main() build_yardstick_image load_yardstick_image load_cirros_image + load_ubuntu_image create_nova_flavor + install_storperf run_test } diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml deleted file mode 100644 index 2d10e4073..000000000 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc004.yaml +++ /dev/null @@ -1,85 +0,0 @@ ---- -# Yardstick TC004 config file -# Measure cache hit/miss ratio and usage, network throughput and latency. -# Different amounts of flows are tested with, from 2 up to 1001000. -# All tests are run 2 times each. First 2 times with the least -# amount of ports, then 2 times with the next amount of ports, -# and so on until all packet sizes have been run with. -# -# During the measurements cache hit/miss ration, cache usage statistics and -# network latency are recorded/measured using cachestat and ping, respectively. - -schema: "yardstick:task:0.1" - -scenarios: -- - type: CACHEstat - run_in_background: true - - options: - interval: 1 - - host: demeter.yardstick -- - type: CACHEstat - run_in_background: true - - options: - interval: 1 - - host: poseidon.yardstick -- - type: Ping - run_in_background: true - - options: - packetsize: 100 - - host: demeter.yardstick - target: poseidon.yardstick - - sla: - max_rtt: 10 - action: monitor -{% for num_ports in [1, 10, 50, 100, 300, 500, 750, 1000] %} -- - type: Pktgen - options: - packetsize: 64 - number_of_ports: {{num_ports}} - duration: 20 - - host: demeter.yardstick - target: poseidon.yardstick - - runner: - type: Iteration - iterations: 2 - interval: 1 - - sla: - max_ppm: 1000 - action: monitor -{% endfor %} - -context: - name: yardstick - image: yardstick-trusty-server - flavor: yardstick-flavor - user: ubuntu - - placement_groups: - pgrp1: - policy: "availability" - - servers: - demeter: - floating_ip: true - placement: "pgrp1" - poseidon: - floating_ip: true - placement: "pgrp1" - - networks: - test: - cidr: '10.0.1.0/24' diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml index ccdcaebc8..544118869 100644 --- a/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc027.yaml @@ -3,7 +3,7 @@ # Measure IPV6 network latency using ping6 schema: "yardstick:task:0.1" - +{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_physical/pod.yaml" %} scenarios: - type: Ping6 @@ -33,6 +33,6 @@ precondition: context: type: Node name: IPV6 - file: /home/opnfv/repos/yardstick/etc/yardstick/nodes/compass_sclab_physical/pod.yaml + file: {{pod_info}} diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml new file mode 100644 index 000000000..714306881 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc052.yaml @@ -0,0 +1,79 @@ +--- +# Test case for TC052 :OpenStack Controller Node Disk I/O Block High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "general-attacker" + host: node1 + key: "block-io" + attack_key: "block-io" + + monitors: + - + monitor_type: "openstack-cmd" + key: "nova-flavor-list" + command_name: "nova flavor-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + operations: + - + operation_type: "general-operation" + key: "create-flavor" + operation_key: "nova-create-flavor" + host: node1 + action_parameter: + flavorconfig: "test-001 test-001 100 1 1" + rollback_parameter: + flavorid: "test-001" + + resultCheckers: + - + checker_type: "general-result-checker" + key: "check-flavor" + host: node1 + checker_key: "nova-flavor-checker" + expectedValue: "test-001" + condition: "in" + + steps: + - + actionKey: "block-io" + actionType: "attacker" + index: 1 + + - + actionKey: "nova-flavor-list" + actionType: "monitor" + index: 2 + + - + actionKey: "create-flavor" + actionType: "operation" + index: 3 + + - + actionKey: "check-flavor" + actionType: "resultchecker" + index: 4 + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml new file mode 100644 index 000000000..696ed3ba4 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc053.yaml @@ -0,0 +1,61 @@ +--- +# Test case for TC053 :Openstack Controller Load Balance Service High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "kill-process" + host: node1 + key: "kill-process" + process_name: "haproxy" + + monitors: + - + monitor_type: "process" + key: "service-status" + process_name: "haproxy" + host: node1 + monitor_time: 20 + sla: + max_recover_time: 30 + + - + monitor_type: "openstack-cmd" + key: "list-images" + command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + steps: + - + actionKey: "kill-process" + actionType: "attacker" + index: 1 + - + actionKey: "service-status" + actionType: "monitor" + index: 2 + - + actionKey: "list-images" + actionType: "monitor" + index: 3 + + nodes: + node1: node1.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml new file mode 100644 index 000000000..7d94e3de8 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc054.yaml @@ -0,0 +1,113 @@ +--- +# Test case for TC054 :OpenStack VIP Master Node abnormally shutdown High Availability +# This test case is written by new scenario-based HA testing framework + +schema: "yardstick:task:0.1" +scenarios: + - + type: "GeneralHA" + options: + attackers: + - + fault_type: "bare-metal-down" + host: node1 + key: "bare-metal-down" + + monitors: + - + monitor_type: "openstack-cmd" + key: "list-images" + command_name: "nova image-list" + monitor_time: 10 + sla: + max_outage_time: 5 + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-mgmt-status" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "192.168.0.2" + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-routerp-status" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "172.16.0.2" + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-router-status" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "192.168.0.1" + + - + monitor_type: "general-monitor" + monitor_key: "ip-status" + key: "vip-pub" + host: node2 + monitor_time: 10 + sla: + max_outage_time: 5 + parameter: + ip_address: "172.16.0.3" + + + steps: + - + actionKey: "bare-metal-down" + actionType: "attacker" + index: 1 + - + actionKey: "list-images" + actionType: "monitor" + index: 2 + + - + actionKey: "vip-mgmt-status" + actionType: "monitor" + index: 3 + + - + actionKey: "vip-routerp-status" + actionType: "monitor" + index: 4 + + - + actionKey: "vip-router-status" + actionType: "monitor" + index: 5 + + - + actionKey: "vip-pub" + actionType: "monitor" + index: 6 + + nodes: + node1: node1.LF + node2: node2.LF + runner: + type: Duration + duration: 1 + sla: + outage_time: 5 + action: monitor + +context: + type: Node + name: LF + file: etc/yardstick/nodes/fuel_virtual/pod.yaml diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc063.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc063.yaml new file mode 100644 index 000000000..9da889847 --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc063.yaml @@ -0,0 +1,23 @@ +# Yardstick TC063 config file +# Measure disk size, block size and disk utilization using fdisk and iostat + +schema: "yardstick:task:0.1" +{% set host = host or "node5.yardstick-TC063" %} +{% set pod_info = pod_info or "etc/yardstick/nodes/compass_sclab_virtual/pod.yaml" %} + +scenarios: +- + type: StorageCapacity + options: + test_type: "disk_size" + + host: {{host}} + + runner: + type: Iteration + iterations: 1 + +context: + type: Node + name: yardstick-TC063 + file: {{pod_info}} diff --git a/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml new file mode 100644 index 000000000..d506ccc1e --- /dev/null +++ b/tests/opnfv/test_cases/opnfv_yardstick_tc074.yaml @@ -0,0 +1,27 @@ +--- +# Test case for TC074 StorPerf benchmark task config file +# StorPerf is a tool to measure block and object storage performance in an NFVI + +schema: "yardstick:task:0.1" +{% set public_network = public_network or "ext-net" %} +{% set StorPerf_ip = StorPerf_ip or "192.168.200.2" %} +scenarios: +- + type: StorPerf + options: + agent_count: 1 + agent_image: "Ubuntu-14.04" + public_network: {{public_network}} + volume_size: 4 + block_sizes: "4096" + queue_depths: "4" + StorPerf_ip: {{StorPerf_ip}} + query_interval: 10 + timeout: 300 + + runner: + type: Iteration + iterations: 1 + +context: + type: Dummy diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-kvm-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-kvm-ha_daily.yaml index d38788e2e..331cb1074 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-kvm-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-kvm-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-kvm_ovs-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-kvm_ovs-ha_daily.yaml index 13de81d42..8a840a902 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-kvm_ovs-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-kvm_ovs-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-ha_daily.yaml index 8bd95b655..5a158156c 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-noha_daily.yaml index fcd06638c..4a0cb9b01 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-lxd-noha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml index 67d6535b9..f53ef098f 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-ha_daily.yaml @@ -19,6 +19,13 @@ test_cases: - file_name: opnfv_yardstick_tc014.yaml - + file_name: opnfv_yardstick_tc027.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml"}' +- file_name: opnfv_yardstick_tc037.yaml - file_name: opnfv_yardstick_tc043.yaml @@ -49,6 +56,22 @@ test_cases: constraint: installer: fuel - + file_name: opnfv_yardstick_tc050.yaml + constraint: + installer: fuel +- + file_name: opnfv_yardstick_tc051.yaml + constraint: + installer: fuel +- + file_name: opnfv_yardstick_tc052.yaml + constraint: + installer: fuel +- + file_name: opnfv_yardstick_tc053.yaml + constraint: + installer: fuel +- file_name: opnfv_yardstick_tc055.yaml constraint: installer: compass @@ -57,6 +80,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml @@ -64,3 +95,11 @@ test_cases: file_name: opnfv_yardstick_tc071.yaml - file_name: opnfv_yardstick_tc072.yaml +- + file_name: opnfv_yardstick_tc074.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"public_network": "ext-net", + "StorPerf_ip": "192.168.200.2"}' diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-noha_daily.yaml index 8d516b177..78cd55844 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-nofeature-noha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-nosdn-ovs-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-nosdn-ovs-ha_daily.yaml index 97e0d66fe..cac5bfc69 100644 --- a/tests/opnfv/test_suites/opnfv_os-nosdn-ovs-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-nosdn-ovs-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-ha_daily.yaml index cf6b86aad..d289cb498 100644 --- a/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-noha_daily.yaml index f8e107927..691c14670 100644 --- a/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-ocl-nofeature-noha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l2-bgpvpn-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l2-bgpvpn-ha_daily.yaml index d261bb884..dc823b3e6 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l2-bgpvpn-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l2-bgpvpn-ha_daily.yaml @@ -28,3 +28,11 @@ test_cases: task_args: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' +- + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-ha_daily.yaml index 8270006b2..f1845ca14 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-noha_daily.yaml index 68f46e03f..e7e168166 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l2-nofeature-noha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-ha_daily.yaml index 29009b64d..dbdd63d6b 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-noha_daily.yaml index d60dd0557..c5752981e 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l2-sfc-noha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-odl_l3-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-odl_l3-nofeature-ha_daily.yaml index 56fdf4b0a..c5dcf95a0 100644 --- a/tests/opnfv/test_suites/opnfv_os-odl_l3-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-odl_l3-nofeature-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-onos-nofeature-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-onos-nofeature-ha_daily.yaml index 1aa7db9a0..ba907616e 100644 --- a/tests/opnfv/test_suites/opnfv_os-onos-nofeature-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-onos-nofeature-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-onos-nofeature-noha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-onos-nofeature-noha_daily.yaml index d08b10a38..0a3bc14d9 100644 --- a/tests/opnfv/test_suites/opnfv_os-onos-nofeature-noha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-onos-nofeature-noha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/opnfv/test_suites/opnfv_os-onos-sfc-ha_daily.yaml b/tests/opnfv/test_suites/opnfv_os-onos-sfc-ha_daily.yaml index 639a127bf..8933846bc 100644 --- a/tests/opnfv/test_suites/opnfv_os-onos-sfc-ha_daily.yaml +++ b/tests/opnfv/test_suites/opnfv_os-onos-sfc-ha_daily.yaml @@ -29,6 +29,14 @@ test_cases: huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", "host": "node5.yardstick-TC055"}' - + file_name: opnfv_yardstick_tc063.yaml + constraint: + installer: compass + pod: huawei-pod1 + task_args: + huawei-pod1: '{"pod_info": "etc/yardstick/nodes/compass_sclab_physical/pod.yaml", + "host": "node5.yardstick-TC063"}' +- file_name: opnfv_yardstick_tc069.yaml - file_name: opnfv_yardstick_tc070.yaml diff --git a/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py b/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py new file mode 100644 index 000000000..e3a096446 --- /dev/null +++ b/tests/unit/benchmark/scenarios/networking/test_networkcapacity.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python
+
+##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# Unittest for yardstick.benchmark.scenarios.networking.networkcapacity.NetworkCapacity
+
+import mock
+import unittest
+import os
+import json
+
+from yardstick.benchmark.scenarios.networking import networkcapacity
+
+SAMPLE_OUTPUT = '{"Number of connections":"308","Number of frames received": "166503"}'
+
+@mock.patch('yardstick.benchmark.scenarios.networking.networkcapacity.ssh')
+class NetworkCapacityTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.ctx = {
+ 'host': {
+ 'ip': '172.16.0.137',
+ 'user': 'cirros',
+ 'password': "root"
+ },
+ }
+
+ self.result = {}
+
+ def test_capacity_successful_setup(self, mock_ssh):
+ c = networkcapacity.NetworkCapacity({}, self.ctx)
+ mock_ssh.SSH().execute.return_value = (0, '', '')
+ c.setup()
+ self.assertIsNotNone(c.client)
+ self.assertTrue(c.setup_done)
+
+ def test_capacity_successful(self, mock_ssh):
+ c = networkcapacity.NetworkCapacity({}, self.ctx)
+
+ mock_ssh.SSH().execute.return_value = (0, SAMPLE_OUTPUT, '')
+ c.run(self.result)
+ expected_result = json.loads(SAMPLE_OUTPUT)
+ self.assertEqual(self.result, expected_result)
+
+ def test_capacity_unsuccessful_script_error(self, mock_ssh):
+ c = networkcapacity.NetworkCapacity({}, self.ctx)
+
+ mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
+ self.assertRaises(RuntimeError, c.run, self.result)
diff --git a/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py b/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py new file mode 100644 index 000000000..cad5ba1d1 --- /dev/null +++ b/tests/unit/benchmark/scenarios/storage/test_storagecapacity.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +############################################################################## +# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Unittest for yardstick.benchmark.scenarios.storage.storagecapacity.StorageCapacity + +import mock +import unittest +import os +import json + +from yardstick.benchmark.scenarios.storage import storagecapacity + +DISK_SIZE_SAMPLE_OUTPUT = '{"Numberf of devides": "2", "Total disk size": "1024000000 bytes"}' +BLOCK_SIZE_SAMPLE_OUTPUT = '{"/dev/sda": 1024, "/dev/sdb": 4096}' +DISK_UTIL_RAW_OUTPUT = "vda 10.00\nvda 0.00" +DISK_UTIL_SAMPLE_OUTPUT = '{"vda": {"avg_util": 5.0, "max_util": 10.0, "min_util": 0.0}}' + +@mock.patch('yardstick.benchmark.scenarios.storage.storagecapacity.ssh') +class StorageCapacityTestCase(unittest.TestCase): + + def setUp(self): + self.scn = { + "options": { + 'test_type': 'disk_size' + } + } + self.ctx = { + "host": { + 'ip': '172.16.0.137', + 'user': 'cirros', + 'password': "root" + } + } + self.result = {} + + def test_capacity_successful_setup(self, mock_ssh): + c = storagecapacity.StorageCapacity(self.scn, self.ctx) + + mock_ssh.SSH().execute.return_value = (0, '', '') + c.setup() + self.assertIsNotNone(c.client) + self.assertTrue(c.setup_done) + + def test_capacity_disk_size_successful(self, mock_ssh): + c = storagecapacity.StorageCapacity(self.scn, self.ctx) + + mock_ssh.SSH().execute.return_value = (0, DISK_SIZE_SAMPLE_OUTPUT, '') + c.run(self.result) + expected_result = json.loads(DISK_SIZE_SAMPLE_OUTPUT) + self.assertEqual(self.result, expected_result) + + def test_capacity_block_size_successful(self, mock_ssh): + args = { + "options": { + 'test_type': 'block_size' + } + } + c = storagecapacity.StorageCapacity(args, self.ctx) + + mock_ssh.SSH().execute.return_value = (0, BLOCK_SIZE_SAMPLE_OUTPUT, '') + c.run(self.result) + expected_result = json.loads(BLOCK_SIZE_SAMPLE_OUTPUT) + self.assertEqual(self.result, expected_result) + + def test_capacity_disk_utilization_successful(self, mock_ssh): + args = { + "options": { + 'test_type': 'disk_utilization', + 'interval': 1, + 'count': 2 + } + } + c = storagecapacity.StorageCapacity(args, self.ctx) + + mock_ssh.SSH().execute.return_value = (0, DISK_UTIL_RAW_OUTPUT, '') + c.run(self.result) + expected_result = json.loads(DISK_UTIL_SAMPLE_OUTPUT) + self.assertEqual(self.result, expected_result) + + def test_capacity_unsuccessful_script_error(self, mock_ssh): + c = storagecapacity.StorageCapacity(self.scn, self.ctx) + + mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR') + self.assertRaises(RuntimeError, c.run, self.result) + +def main(): + unittest.main() + +if __name__ == '__main__': + main() diff --git a/tools/dpdk_install.yml b/tools/dpdk_install.yml new file mode 100644 index 000000000..12c83e144 --- /dev/null +++ b/tools/dpdk_install.yml @@ -0,0 +1,124 @@ +heat_template_version: 2015-04-30 + +description: > + Used to run VMs with DPDK pktgen + +parameters: + image: + type: string + description: Name of the image + default: yardstick-wily-server + + timeout: + type: number + description: Timeout in seconds for WaitCondition, depends on your image and environment + default: 900 + + external_net_name: + type: string + description: Name of the external network which management network will connect to + default: admin_floating_net + +resources: + flavor: + type: OS::Nova::Flavor + properties: + ram: 4096 + vcpus: 4 + disk: 4 + + network: + type: OS::Neutron::Net + properties: + name: dpdk_net + + subnet: + type: OS::Neutron::Subnet + properties: + name: dpdk_subnet + ip_version: 4 + cidr: 192.168.0.0/24 + network: { get_resource: network } + + management_router: + type: OS::Neutron::Router + properties: + name: management_router + external_gateway_info: + network: { get_param: external_net_name } + + management_router_interface: + type: OS::Neutron::RouterInterface + properties: + router: { get_resource: management_router } + subnet: { get_resource: subnet } + + floating_ip: + type: OS::Neutron::FloatingIP + properties: + floating_network: { get_param: external_net_name } + + floating_ip_association: + type: OS::Nova::FloatingIPAssociation + properties: + floating_ip: { get_resource: floating_ip } + server_id: {get_resource: dpdk_vm} + + keypair: + type: OS::Nova::KeyPair + properties: + name: yardstick-key + public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0RkXfW6pksd1cZmXuvXZF/Mlqqq3ahIGcGoULOC97XMpu0vdxMpcUwdjwGqMwEXTVyfHidu0l99bLqOCpSUKCmbWx3ONJ+1kqFx4HwsKEWLiyDYqsuMrDeZT1eFjC5avCoTcrIw2wq5NaBb00lDGagNZOeopaL5YIa4+PizEY23+cir24D67NU21Fg3JE92AIeGlNa4j66L3a+lL0hZq74Dilmp42wm4GsbplRO6KJfyaraowHb1X+TmhCjBgHk6M/OJ9yPAroZyJNcwjMAuuxhAYWRuT3SdbnoUR0RG2VhfDh0qNid7vOqLbhKPeaLLFmzkN+9w3WdCp6LbSYt87 yardstick@yardstick.opnfv.org + + wait_handle: + type: OS::Heat::WaitConditionHandle + + wait_condition: + type: OS::Heat::WaitCondition + properties: + handle: { get_resource: wait_handle } + count: 1 + timeout: { get_param: timeout } + + dpdk_vm: + type: OS::Nova::Server + depends_on: [subnet, keypair, flavor] + properties: + name: { get_param: "OS::stack_name" } + image: { get_param: image } + flavor: { get_resource: flavor } + key_name: {get_resource: keypair} + networks: + - network: { get_resource: network } + config_drive: True + user_data_format : RAW + user_data: + str_replace: + template: | + #!/bin/sh + cat <<'CEOF' > /tmp/dpdk_post_build.sh + export RTE_SDK=/dpdk + export RTE_TARGET=x86_64-native-linuxapp-gcc + cd /dpdk + make install T=x86_64-native-linuxapp-gcc DESTDIR=destdir + modprobe uio + insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/igb_uio.ko + insmod /dpdk/x86_64-native-linuxapp-gcc/kmod/rte_kni.ko + cd /pktgen-dpdk + make RTE_SDK=/dpdk + echo "PCKTGEN BUILT" + rm -rf /var/lib/cloud/instances + echo "rm succesfull" + ls /dpdk/x86_64-native-linuxapp-gcc/kmod/ + $NOTIFY --data-binary '{"status": "SUCCESS"}' + CEOF + chmod +x /tmp/dpdk_post_build.sh + echo "chmod" + nohup /tmp/dpdk_post_build.sh & + params: + $NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] } + +outputs: + vm_uuid: + description: uuid of the VM + value: { get_attr: [ dpdk_vm, show,id ] } diff --git a/tools/ubuntu-server-cloudimg-dpdk-modify.sh b/tools/ubuntu-server-cloudimg-dpdk-modify.sh new file mode 100755 index 000000000..aa4e252ea --- /dev/null +++ b/tools/ubuntu-server-cloudimg-dpdk-modify.sh @@ -0,0 +1,98 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# installs required packages +# must be run from inside the image (either chrooted or running) + +set -ex + +if [ $# -eq 1 ]; then + nameserver_ip=$1 + + # /etc/resolv.conf is a symbolic link to /run, restore at end + rm /etc/resolv.conf + echo "nameserver $nameserver_ip" > /etc/resolv.conf + echo "nameserver 8.8.8.8" >> /etc/resolv.conf + echo "nameserver 8.8.4.4" >> /etc/resolv.conf +fi + +# iperf3 only available for wily in backports +grep wily /etc/apt/sources.list && \ + echo "deb http://archive.ubuntu.com/ubuntu/ wily-backports main restricted universe multiverse" >> /etc/apt/sources.list + +# Workaround for building on CentOS (apt-get is not working with http sources) +# sed -i 's/http/ftp/' /etc/apt/sources.list + +# Force apt to use ipv4 due to build problems on LF POD. +echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99force-ipv4 + +echo 'GRUB_CMDLINE_LINUX="resume=/dev/sda1 default_hugepagesz=1G hugepagesz=1G hugepages=2 iommu=on iommu=pt intel_iommu=on"' >> /etc/default/grub +echo 'vm.nr_hugepages=1024' >> /etc/sysctl.conf +echo 'huge /mnt/huge hugetlbfs defaults 0 0' >> vi /etc/fstab + +mkdir /mnt/huge +chmod 777 /mnt/huge + +for i in {1..2} +do + touch /etc/network/interfaces.d/eth$i.cfg + chmod 777 /etc/network/interfaces.d/eth$i.cfg + echo "auto eth$i" >> /etc/network/interfaces.d/eth$i.cfg + echo "iface eth$i inet dhcp" >> /etc/network/interfaces.d/eth$i.cfg +done + +# this needs for checking dpdk status, adding interfaces to dpdk, bind, unbind etc.. + +# Add hostname to /etc/hosts. +# Allow console access via pwd +cat <<EOF >/etc/cloud/cloud.cfg.d/10_etc_hosts.cfg +manage_etc_hosts: True +password: RANDOM +chpasswd: { expire: False } +ssh_pwauth: True +EOF + +linuxheadersversion=`echo ls boot/vmlinuz* | cut -d- -f2-` + +apt-get update +apt-get install -y \ + fio \ + gcc \ + git \ + iperf3 \ + linux-tools-common \ + linux-tools-generic \ + lmbench \ + make \ + netperf \ + patch \ + perl \ + rt-tests \ + stress \ + sysstat \ + linux-headers-$linuxheadersversion \ + libpcap-dev \ + lua5.2 + +git clone http://dpdk.org/git/dpdk +git clone http://dpdk.org/git/apps/pktgen-dpdk + +git clone https://github.com/kdlucas/byte-unixbench.git /opt/tempT +make --directory /opt/tempT/UnixBench/ + +git clone https://github.com/beefyamoeba5/ramspeed.git /opt/tempT/RAMspeed +cd /opt/tempT/RAMspeed/ramspeed-2.6.0 +mkdir temp +bash build.sh + +git clone https://github.com/beefyamoeba5/cachestat.git /opt/tempT/Cachestat + +# restore symlink +ln -sf /run/resolvconf/resolv.conf /etc/resolv.conf diff --git a/tools/yardstick-img-dpdk-finalize.sh b/tools/yardstick-img-dpdk-finalize.sh new file mode 100644 index 000000000..7a450e269 --- /dev/null +++ b/tools/yardstick-img-dpdk-finalize.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# installs dpdk and pktgen packages on modified image + +# PREREQUISITES +# modified image (yardstick-wily-server) must be uploaded to OpenStack +# heat must be installed: apt-get install python-heatclient, python-glanceclient, python-nova +# must have a public yardstick-key uploaded in openstack +# must have a proper flavor for the image (i.e. m1.small) + + +stackname="yardstick-modify-stack" +template=dpdk_install.yml +new_image_name="yardstick-image-pktgen-ready" + +openstack stack create $stackname -f yaml -t $template +progress="WARMING_UP" + +while [ "$progress" != "CREATE_COMPLETE" ] +do + sleep 10 + echo "check stack status......." + show_output=$(openstack stack show $stackname) + progress=$(echo $show_output | sed 's/^.*stack_status . \([^ ]*\).*$/\1/') + echo "$progress" + if [ "$progress" == "CREATE_FAILED" ];then + echo "create $stackname failed" + exit 1 + fi +done + +# workaround: Without wait time, the file size of pktgen is zero in the snapshot. +sleep 60 + +status=$(nova image-create --poll $stackname $new_image_name) +if [[ "$status" =~ "Finished" ]];then + echo "$new_image_name finished" +fi + +nova delete $stackname +sleep 10 +openstack stack delete --yes $stackname diff --git a/tools/yardstick-img-dpdk-modify b/tools/yardstick-img-dpdk-modify new file mode 100644 index 000000000..ec2672d27 --- /dev/null +++ b/tools/yardstick-img-dpdk-modify @@ -0,0 +1,162 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# yardstick-img-dpdk-modify - download and modify a Ubuntu cloud image +# +# The actual customization is done by a script passed with an absolute path as +# the only single argument. The command needs to be invoked as sudo +# +# Example invocation: +# yardstick-img-dpdk-modify /home/yardstick/tools/ubuntu-server-cloudimg-dpdk-modify.sh +# +# Warning: the script will create files by default in: +# /tmp/workspace/yardstick +# the files will be owned by root! +# +# TODO: image resize is needed if the base image is too small +# + +set -e +set -x + +die() { + echo "error: $1" >&2 + exit 1 +} + +test $# -eq 1 || die "no image specific script as argument" +test $(id -u) -eq 0 || die "should invoke using sudo" + +cmd=$1 +test -x $cmd +mountdir="/mnt/yardstick" + +workspace=${WORKSPACE:-"/tmp/workspace/yardstick"} +host=${HOST:-"cloud-images.ubuntu.com"} +release=${RELEASE:-"wily"} +image_path="${release}/current/${release}-server-cloudimg-amd64-disk1.img" +image_url=${IMAGE_URL:-"https://${host}/${image_path}"} +md5sums_path="${release}/current/MD5SUMS" +md5sums_url=${MD5SUMS_URL:-"https://${host}/${md5sums_path}"} + +imgfile="${workspace}/yardstick-${release}-server" +raw_imgfile="${workspace}/yardstick-${release}-server.raw" +filename=$(basename $image_url) + +# download and checksum base image, conditionally if local copy is outdated +download() { + test -d $workspace || mkdir -p $workspace + cd $workspace + rm -f MD5SUMS # always download the checksum file to a detect stale image + wget $md5sums_url + test -e $filename || wget -nc $image_url + grep $filename MD5SUMS | md5sum -c || + if [ $? -ne 0 ]; then + rm $filename + wget -nc $image_url + grep $filename MD5SUMS | md5sum -c + fi + qemu-img convert $filename $raw_imgfile + cd - +} + +# mount image +setup() { + mkdir -p $mountdir + + for i in $(seq 0 9); do + [ -a /dev/loop$i ] || mknod -m 660 /dev/loop$i b 7 $i + done + + loopdevice=$(kpartx -l $raw_imgfile | head -1 | cut -f1 -d ' ') + + kpartx -a $raw_imgfile + + mount /dev/mapper/$loopdevice $mountdir + mount -t proc none $mountdir/proc + + echo $loopdevice + + sudo resize2fs /dev/mapper/$loopdevice + + cp $cmd $mountdir/$(basename $cmd) +} + +# modify image running a script using in a chrooted environment +modify() { + # resolv.conf does not exist in base image, pass nameserver value from host + nameserver_ip=$(grep -m 1 '^nameserver' \ + /etc/resolv.conf | awk '{ print $2 '}) + + # prevent init scripts from running during install + echo $'#!/bin/sh\nexit 101' >$mountdir/usr/sbin/policy-rc.d + chmod a+x $mountdir/usr/sbin/policy-rc.d + + chroot $mountdir /$(basename $cmd) $nameserver_ip + + rm -rf $mountdir/usr/sbin/policy-rc.d + + umount -f $mountdir/proc + umount $mountdir + + qemu-img convert -c -o compat=0.10 -O qcow2 $raw_imgfile $imgfile +# qemu-img convert -O vmdk $raw_imgfile $imgfile + + if dmsetup table | grep $loopdevice; then + dmsetup clear $loopdevice || true + fi +} + +# cleanup (umount) the image +cleanup() { + # designed to be idempotent + mount | grep $mountdir/proc && umount $mountdir/proc + mount | grep $mountdir && umount $mountdir + if [ -f $raw_imgfile ]; then + kpartx -dv $raw_imgfile || true + fi + rm -f $raw_imgfile + rm -rf $mountdir +} + +exitcode="" +error_trap() +{ + local rc=$? + + set +e + + if [ -z "$exitcode" ]; then + exitcode=$rc + fi + + cleanup + + echo "Image build failed with $exitcode" + + exit $exitcode +} + +main() { + cleanup + + trap "error_trap" EXIT SIGTERM + + download + setup + modify + trap - EXIT SIGTERM + cleanup + + echo "the modified image is found here: $imgfile" +} + +main diff --git a/yardstick/benchmark/scenarios/networking/networkcapacity.bash b/yardstick/benchmark/scenarios/networking/networkcapacity.bash new file mode 100644 index 000000000..a18f97e0b --- /dev/null +++ b/yardstick/benchmark/scenarios/networking/networkcapacity.bash @@ -0,0 +1,41 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Measure compute capacity and scale of a host + +set -e +OUTPUT_FILE=/tmp/netperf-out.log + +# run capacity test +run_capacity() +{ + netstat -s > $OUTPUT_FILE +} + +# write the result to stdout in json format +output_json() +{ + CONNECTIONS=$(awk '/active/{print $1}' $OUTPUT_FILE) + FRAMES=$(awk '/total\ packets\ received/{print $1}' $OUTPUT_FILE) + echo -e "{ \ + \"Number of connections\":\"$CONNECTIONS\", \ + \"Number of frames received\": \"$FRAMES\" \ + }" +} + +main() +{ + run_capacity + + output_json +} + +main diff --git a/yardstick/benchmark/scenarios/networking/networkcapacity.py b/yardstick/benchmark/scenarios/networking/networkcapacity.py new file mode 100644 index 000000000..57d3b5072 --- /dev/null +++ b/yardstick/benchmark/scenarios/networking/networkcapacity.py @@ -0,0 +1,69 @@ +##############################################################################
+# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+import pkg_resources
+import logging
+import json
+
+import yardstick.ssh as ssh
+from yardstick.benchmark.scenarios import base
+
+LOG = logging.getLogger(__name__)
+
+
+class NetworkCapacity(base.Scenario):
+ """Measure Network capacity and scale.
+
+ This scenario reads network status including number of connections,
+ number of frames sent/received.
+ """
+ __scenario_type__ = "NetworkCapacity"
+ TARGET_SCRIPT = "networkcapacity.bash"
+
+ def __init__(self, scenario_cfg, context_cfg):
+ self.scenario_cfg = scenario_cfg
+ self.context_cfg = context_cfg
+ self.setup_done = False
+
+ def setup(self):
+ """scenario setup"""
+ self.target_script = pkg_resources.resource_filename(
+ "yardstick.benchmark.scenarios.networking",
+ NetworkCapacity.TARGET_SCRIPT)
+
+ host = self.context_cfg['host']
+ if host is None:
+ raise RuntimeError('No right node.please check the configuration')
+ host_user = host.get('user', 'ubuntu')
+ host_ip = host.get('ip', None)
+ host_pwd = host.get('password', None)
+
+ LOG.debug("user:%s, host:%s", host_user, host_ip)
+ self.client = ssh.SSH(host_user, host_ip, password=host_pwd)
+ self.client.wait(timeout=600)
+
+ # copy script to host
+ self.client.run("cat > ~/networkcapacity.sh",
+ stdin=open(self.target_script, 'rb'))
+
+ self.setup_done = True
+
+ def run(self, result):
+ """execute the benchmark"""
+
+ if not self.setup_done:
+ self.setup()
+
+ cmd = "sudo bash networkcapacity.sh"
+
+ LOG.debug("Executing command: %s", cmd)
+ status, stdout, stderr = self.client.execute(cmd)
+ if status:
+ raise RuntimeError(stderr)
+
+ result.update(json.loads(stdout))
diff --git a/yardstick/benchmark/scenarios/storage/storagecapacity.bash b/yardstick/benchmark/scenarios/storage/storagecapacity.bash new file mode 100644 index 000000000..6ed4b2811 --- /dev/null +++ b/yardstick/benchmark/scenarios/storage/storagecapacity.bash @@ -0,0 +1,69 @@ +#!/bin/bash + +############################################################################## +# Copyright (c) 2016 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Measure storage capacity and scale of a host + +set -e +OUTPUT_FILE=/tmp/storagecapacity-out.log + +# run disk_size test +run_disk_size() +{ + fdisk -l | grep '^Disk.*bytes$' | awk -F [:,\ ] '{print $2,$7}' > $OUTPUT_FILE +} + +# write the disk size to stdout in json format +output_disk_size() +{ + DEVICENUM=`awk 'END{print NR}' $OUTPUT_FILE` + DISKSIZE=`awk 'BEGIN{cnt=0;} {cnt=cnt+$2} END{print cnt}' $OUTPUT_FILE` + echo -e "{\ + \"Number of devices\":\"$DEVICENUM\", \ + \"Total disk size\":\"$DISKSIZE bytes\" \ + }" +} + +# run block_size test +run_block_size() +{ + echo -n "" > $OUTPUT_FILE + blkdevices=`fdisk -l | grep '^Disk.*bytes$' | awk -F [:,\ ] '{print $2}'` + blkdevices=($blkdevices) + for bd in "${blkdevices[@]}";do + blk_size=`blockdev --getbsz $bd` + echo '"'$bd'" '$blk_size >> $OUTPUT_FILE + done +} + +# write the block size to stdout in json format +output_block_size() +{ + BLK_SIZE_STR=`awk 'BEGIN{r="{";} {r=r""$1":"$2","} END{print r}' $OUTPUT_FILE` + BLK_SIZE_STR=${BLK_SIZE_STR%,}"}" + echo $BLK_SIZE_STR +} + +main() +{ + test_type=$1 + case $test_type in + "disk_size" ) + run_disk_size + output_disk_size + ;; + "block_size" ) + run_block_size + output_block_size + ;; + esac +} + +main $1 diff --git a/yardstick/benchmark/scenarios/storage/storagecapacity.py b/yardstick/benchmark/scenarios/storage/storagecapacity.py new file mode 100644 index 000000000..49e3a0339 --- /dev/null +++ b/yardstick/benchmark/scenarios/storage/storagecapacity.py @@ -0,0 +1,133 @@ +############################################################################## +# Copyright (c) 2016 Huawei Technologies Co.,Ltd and other. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +import pkg_resources +import logging +import json + +import yardstick.ssh as ssh +from yardstick.benchmark.scenarios import base + +LOG = logging.getLogger(__name__) + + +class StorageCapacity(base.Scenario): + """Measure storage capacity and scale. + + Parameters: + test_type - specified whether to measure. + valid test type are disk_size, block_size, disk_utilization + type: string + unit: na + default: "disk_size" + interval - specified how ofter to stat disk utilization + type: int + unit: seconds + default: 1 + count - specified how many times to stat disk utilization + type: int + unit: na + default: 15 + + This scenario reads hardware specification, + disk size, block size and disk utilization. + """ + __scenario_type__ = "StorageCapacity" + TARGET_SCRIPT = "storagecapacity.bash" + + def __init__(self, scenario_cfg, context_cfg): + self.scenario_cfg = scenario_cfg + self.context_cfg = context_cfg + self.setup_done = False + + def setup(self): + """scenario setup""" + self.target_script = pkg_resources.resource_filename( + "yardstick.benchmark.scenarios.storage", + StorageCapacity.TARGET_SCRIPT) + host = self.context_cfg['host'] + if host is None: + raise RuntimeError('No right node.Please check the configuration') + host_user = host.get('user', 'ubuntu') + host_ip = host.get('ip', None) + host_pwd = host.get('password', 'root') + LOG.debug("user:%s, host:%s", host_user, host_ip) + + self.client = ssh.SSH(host_user, host_ip, password=host_pwd) + self.client.wait(timeout=600) + + # copy script to host + self.client.run("cat > ~/storagecapacity.sh", + stdin=open(self.target_script, 'rb')) + + self.setup_done = True + + def _get_disk_utilization(self): + """Get disk utilization using iostat.""" + options = self.scenario_cfg["options"] + interval = options.get('interval', 1) + count = options.get('count', 15) + + cmd = "sudo iostat -dx %d %d | awk 'NF==14 && \ + $1 !~ /Device/ {print $1,$14}'" % (interval, count) + + LOG.debug("Executing command: %s", cmd) + status, stdout, stderr = self.client.execute(cmd) + if status: + raise RuntimeError(stderr) + + device_name_arr = [] + min_util_arr = [] + max_util_arr = [] + avg_util_arr = [] + for row in stdout.split('\n'): + kv = row.split(' ') + if len(kv) != 2: + continue + name = kv[0] + util = float(kv[1]) + if name not in device_name_arr: + device_name_arr.append(name) + min_util_arr.append(util) + max_util_arr.append(util) + avg_util_arr.append(util) + else: + i = device_name_arr.index(name) + min_util_arr[i] = min_util_arr[i] \ + if min_util_arr[i] < util else util + max_util_arr[i] = max_util_arr[i] \ + if max_util_arr[i] > util else util + avg_util_arr[i] += util + r = {} + for i in range(len(device_name_arr)): + r[device_name_arr[i]] = {"min_util": min_util_arr[i], + "max_util": max_util_arr[i], + "avg_util": avg_util_arr[i]/count} + return r + + def run(self, result): + """execute the benchmark""" + + if not self.setup_done: + self.setup() + + options = self.scenario_cfg["options"] + test_type = options.get('test_type', 'disk_size') + + if test_type == "disk_utilization": + r = self._get_disk_utilization() + result.update(r) + else: + cmd = "sudo bash storagecapacity.sh " + test_type + + LOG.debug("Executing command: %s", cmd) + status, stdout, stderr = self.client.execute(cmd) + if status: + raise RuntimeError(stderr) + + result.update(json.loads(stdout)) diff --git a/yardstick/cmd/commands/plugin.py b/yardstick/cmd/commands/plugin.py index 8e3ddb5a5..9936942d8 100644 --- a/yardstick/cmd/commands/plugin.py +++ b/yardstick/cmd/commands/plugin.py @@ -9,6 +9,7 @@ """ Handler for yardstick command 'plugin' """ +import os import sys import yaml import time @@ -40,8 +41,10 @@ class PluginCommands(object): plugin_name = plugins.get("name") print("Installing plugin: %s" % plugin_name) + LOG.info("Executing _install_setup()") self._install_setup(plugin_name, deployment) + LOG.info("Executing _run()") self._run(plugin_name) total_end_time = time.time() @@ -60,10 +63,12 @@ class PluginCommands(object): plugins, deployment = parser.parse_plugin() plugin_name = plugins.get("name") - print("Remove plugin: %s" % plugin_name) + print("Removing plugin: %s" % plugin_name) + LOG.info("Executing _remove_setup()") self._remove_setup(plugin_name, deployment) + LOG.info("Executing _run()") self._run(plugin_name) total_end_time = time.time() @@ -80,15 +85,25 @@ class PluginCommands(object): deployment_user = deployment.get("user") deployment_ip = deployment.get("ip") - deployment_password = deployment.get("password") - LOG.debug("user:%s, host:%s", deployment_user, deployment_ip) - self.client = ssh.SSH(deployment_user, deployment_ip, - password=deployment_password) - self.client.wait(timeout=600) + + if deployment_ip == "local": + installer_ip = os.environ.get("INSTALLER_IP", None) + + LOG.info("user:%s, host:%s", deployment_user, installer_ip) + self.client = ssh.SSH(deployment_user, installer_ip, + password=deployment_password) + self.client.wait(timeout=600) + else: + LOG.info("user:%s, host:%s", deployment_user, deployment_ip) + self.client = ssh.SSH(deployment_user, deployment_ip, + password=deployment_password) + self.client.wait(timeout=600) # copy script to host cmd = "cat > ~/%s.sh" % plugin_name + + LOG.info("copying script to host: %s", cmd) self.client.run(cmd, stdin=open(self.script, 'rb')) def _remove_setup(self, plugin_name, deployment): @@ -99,22 +114,32 @@ class PluginCommands(object): deployment_user = deployment.get("user") deployment_ip = deployment.get("ip") - deployment_password = deployment.get("password") - LOG.debug("user:%s, host:%s", deployment_user, deployment_ip) - self.client = ssh.SSH(deployment_user, deployment_ip, - password=deployment_password) - self.client.wait(timeout=600) + + if deployment_ip == "local": + installer_ip = os.environ.get("INSTALLER_IP", None) + + LOG.info("user:%s, host:%s", deployment_user, installer_ip) + self.client = ssh.SSH(deployment_user, installer_ip, + password=deployment_password) + self.client.wait(timeout=600) + else: + LOG.info("user:%s, host:%s", deployment_user, deployment_ip) + self.client = ssh.SSH(deployment_user, deployment_ip, + password=deployment_password) + self.client.wait(timeout=600) # copy script to host cmd = "cat > ~/%s.sh" % plugin_name + + LOG.info("copying script to host: %s", cmd) self.client.run(cmd, stdin=open(self.script, 'rb')) def _run(self, plugin_name): '''Run installation script ''' cmd = "sudo bash %s" % plugin_name + ".sh" - LOG.debug("Executing command: %s", cmd) + LOG.info("Executing command: %s", cmd) status, stdout, stderr = self.client.execute(cmd) |