diff options
21 files changed, 758 insertions, 337 deletions
diff --git a/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json b/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json index 3c78ab18d..659522510 100644 --- a/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json +++ b/dashboard/Prox_BM_L2FWD-4Port_MultiSize-1518452496550.json @@ -44,6 +44,7 @@ "annotations": { "list": [] }, + "editMode": false, "editable": true, "gnetId": null, "graphTooltip": 0, @@ -149,7 +150,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.xe0.out_packets\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE $timeFilter GROUP BY time($interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.xe0.out_packets\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE $timeFilter GROUP BY time($interval) fill(null)", "rawQuery": false, "refId": "A", "resultFormat": "time_series", @@ -283,7 +284,7 @@ [ { "params": [ - "tg__0.xe0.in_packets" + "tg__0.collect_stats.xe0.in_packets" ], "type": "field" }, @@ -321,7 +322,7 @@ [ { "params": [ - "tg__0.xe1.in_packets" + "tg__0.collect_stats.xe1.in_packets" ], "type": "field" }, @@ -359,7 +360,7 @@ [ { "params": [ - "tg__0.xe2.in_packets" + "tg__0.collect_stats.xe2.in_packets" ], "type": "field" }, @@ -397,7 +398,7 @@ [ { "params": [ - "tg__0.xe3.in_packets" + "tg__0.collect_stats.xe3.in_packets" ], "type": "field" }, @@ -525,7 +526,7 @@ [ { "params": [ - "tg__0.xe0.out_packets" + "tg__0.collect_stats.xe0.out_packets" ], "type": "field" }, @@ -563,7 +564,7 @@ [ { "params": [ - "tg__0.xe1.out_packets" + "tg__0.collect_stats.xe1.out_packets" ], "type": "field" }, @@ -601,7 +602,7 @@ [ { "params": [ - "tg__0.xe2.out_packets" + "tg__0.collect_stats.xe2.out_packets" ], "type": "field" }, @@ -639,7 +640,7 @@ [ { "params": [ - "tg__0.xe3.out_packets" + "tg__0.collect_stats.xe3.out_packets" ], "type": "field" }, @@ -1060,7 +1061,7 @@ [ { "params": [ - "tg__0.TxThroughput" + "tg__0.collect_stats.TxThroughput" ], "type": "field" }, @@ -1099,7 +1100,7 @@ [ { "params": [ - "tg__0.RxThroughput" + "tg__0.collect_stats.RxThroughput" ], "type": "field" }, @@ -1225,7 +1226,7 @@ [ { "params": [ - "tg__0.PktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -1355,13 +1356,15 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", + "query": "SELECT mean(\"tg__0.collect_stats.tx_total\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.Status\" = 'Success' AND $timeFilter GROUP BY time($__interval) fill(none)", + "rawQuery": false, "refId": "B", "resultFormat": "time_series", "select": [ [ { "params": [ - "tg__0.Success_tx_total" + "tg__0.collect_stats.tx_total" ], "type": "field" }, @@ -1371,7 +1374,13 @@ } ] ], - "tags": [] + "tags": [ + { + "key": "tg__0.collect_stats.Status", + "operator": "=", + "value": "Success" + } + ] }, { "alias": "SUCCESS Rx Total", @@ -1393,13 +1402,15 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", + "query": "SELECT mean(\"tg__0.collect_stats.rx_total\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.Status\" = 'Success' AND $timeFilter GROUP BY time($__interval) fill(null)", + "rawQuery": false, "refId": "A", "resultFormat": "time_series", "select": [ [ { "params": [ - "tg__0.Success_rx_total" + "tg__0.collect_stats.rx_total" ], "type": "field" }, @@ -1409,7 +1420,13 @@ } ] ], - "tags": [] + "tags": [ + { + "key": "tg__0.collect_stats.Status", + "operator": "=", + "value": "Success" + } + ] }, { "alias": "SUCCESS ALLOWABLE LOST PACKETS", @@ -1431,13 +1448,15 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", + "query": "SELECT mean(\"tg__0.collect_stats.can_be_lost\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.Status\" = 'Success' AND $timeFilter GROUP BY time($__interval) fill(null)", + "rawQuery": false, "refId": "C", "resultFormat": "time_series", "select": [ [ { "params": [ - "tg__0.Success_can_be_lost" + "tg__0.collect_stats.can_be_lost" ], "type": "field" }, @@ -1447,7 +1466,13 @@ } ] ], - "tags": [] + "tags": [ + { + "key": "tg__0.collect_stats.Status", + "operator": "=", + "value": "Success" + } + ] } ], "thresholds": [], @@ -1567,7 +1592,7 @@ [ { "params": [ - "tg__0.duration" + "tg__0.collect_stats.duration" ], "type": "field" }, @@ -1675,7 +1700,7 @@ [ { "params": [ - "tg__0.test_duration" + "tg__0.collect_stats.test_duration" ], "type": "field" }, @@ -1783,7 +1808,7 @@ [ { "params": [ - "tg__0.test_precision" + "tg__0.collect_stats.test_precision" ], "type": "field" }, @@ -1891,7 +1916,7 @@ [ { "params": [ - "tg__0.tolerated_loss" + "tg__0.collect_stats.tolerated_loss" ], "type": "field" }, @@ -2048,7 +2073,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 64 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2056,7 +2081,7 @@ [ { "params": [ - "tg__0.Result_pktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -2068,7 +2093,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "64" } @@ -2165,7 +2190,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 64 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2173,7 +2198,7 @@ [ { "params": [ - "tg__0.Result_theor_max_throughput" + "tg__0.collect_stats.theor_max_throughput" ], "type": "field" }, @@ -2185,7 +2210,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "64" } @@ -2281,7 +2306,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 64 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 64 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2289,7 +2314,7 @@ [ { "params": [ - "tg__0.Result_Actual_throughput" + "tg__0.collect_stats.Actual_throughput" ], "type": "field" }, @@ -2301,7 +2326,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "64" } @@ -2409,7 +2434,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 128 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2417,7 +2442,7 @@ [ { "params": [ - "tg__0.Result_pktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -2429,7 +2454,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "128" } @@ -2525,7 +2550,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 128 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2533,7 +2558,7 @@ [ { "params": [ - "tg__0.Result_theor_max_throughput" + "tg__0.collect_stats.theor_max_throughput" ], "type": "field" }, @@ -2545,7 +2570,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "128" } @@ -2641,7 +2666,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 128 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 128 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2649,7 +2674,7 @@ [ { "params": [ - "tg__0.Result_Actual_throughput" + "tg__0.collect_stats.Actual_throughput" ], "type": "field" }, @@ -2661,7 +2686,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "128" } @@ -2768,7 +2793,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 256 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2776,7 +2801,7 @@ [ { "params": [ - "tg__0.Result_pktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -2788,7 +2813,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSiuze", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "256" } @@ -2883,7 +2908,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 256 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -2891,7 +2916,7 @@ [ { "params": [ - "tg__0.Result_theor_max_throughput" + "tg__0.collect_stats.theor_max_throughput" ], "type": "field" }, @@ -2903,7 +2928,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "256" } @@ -2998,7 +3023,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 256 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 256 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3006,7 +3031,7 @@ [ { "params": [ - "tg__0.Result_Actual_throughput" + "tg__0.collect_stats.Actual_throughput" ], "type": "field" }, @@ -3018,7 +3043,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "256" } @@ -3125,7 +3150,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 512 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3133,7 +3158,7 @@ [ { "params": [ - "tg__0.Result_pktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -3145,7 +3170,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "512" } @@ -3241,7 +3266,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 512 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3249,7 +3274,7 @@ [ { "params": [ - "tg__0.Result_theor_max_throughput" + "tg__0.collect_stats.theor_max_throughput" ], "type": "field" }, @@ -3261,7 +3286,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "512" } @@ -3357,7 +3382,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 512 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 512 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3365,7 +3390,7 @@ [ { "params": [ - "tg__0.Result_Actual_throughput" + "tg__0.collect_stats.Actual_throughput" ], "type": "field" }, @@ -3377,7 +3402,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "512" } @@ -3483,7 +3508,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1024 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3491,7 +3516,7 @@ [ { "params": [ - "tg__0.Result_pktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -3503,7 +3528,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "1024" } @@ -3598,7 +3623,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1024 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3606,7 +3631,7 @@ [ { "params": [ - "tg__0.Result_theor_max_throughput" + "tg__0.collect_stats.theor_max_throughput" ], "type": "field" }, @@ -3618,7 +3643,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "1024" } @@ -3713,7 +3738,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1024 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1024 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3721,7 +3746,7 @@ [ { "params": [ - "tg__0.Result_Actual_throughput" + "tg__0.collect_stats.Actual_throughput" ], "type": "field" }, @@ -3733,7 +3758,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "1024" } @@ -3840,7 +3865,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1280 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3848,7 +3873,7 @@ [ { "params": [ - "tg__0.Result_pktSize" + "tg__0.collect_stats.PktSize" ], "type": "field" }, @@ -3860,7 +3885,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "1280" } @@ -3956,7 +3981,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1280 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -3964,7 +3989,7 @@ [ { "params": [ - "tg__0.Result_theor_max_throughput" + "tg__0.collect_stats.theor_max_throughput" ], "type": "field" }, @@ -3976,7 +4001,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "1280" } @@ -4072,7 +4097,7 @@ "measurement": "tc_prox_baremetal_l2fwd-4", "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1280 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1280 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -4080,7 +4105,7 @@ [ { "params": [ - "tg__0.Result_Actual_throughput" + "tg__0.collect_stats.Actual_throughput" ], "type": "field" }, @@ -4092,7 +4117,7 @@ ], "tags": [ { - "key": "tg__0.Result_pktSize", + "key": "tg__0.collect_stats.PktSize", "operator": "=", "value": "1280" } @@ -4197,7 +4222,7 @@ ], "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_pktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.PktSize\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1518 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -4305,7 +4330,7 @@ ], "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.theor_max_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1518 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -4413,7 +4438,7 @@ ], "orderByTime": "ASC", "policy": "default", - "query": "SELECT mean(\"tg__0.Result_Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.Result_pktSize\" = 1518 AND $timeFilter GROUP BY time($__interval) fill(null)", + "query": "SELECT mean(\"tg__0.collect_stats.Actual_throughput\") FROM \"tc_prox_baremetal_l2fwd-4\" WHERE \"tg__0.collect_stats.PktSize\" = 1518 AND \"tg__0.collect_stats.Status\" = 'Result' AND $timeFilter GROUP BY time($__interval) fill(null)", "rawQuery": true, "refId": "A", "resultFormat": "time_series", @@ -4541,7 +4566,7 @@ [ { "params": [ - "tg__0.LatencyAvg.5" + "tg__0.collect_stats.LatencyAvg.5" ], "type": "field" }, @@ -4579,7 +4604,7 @@ [ { "params": [ - "tg__0.LatencyMax.5" + "tg__0.collect_stats.LatencyMax.5" ], "type": "field" }, @@ -4687,7 +4712,7 @@ [ { "params": [ - "tg__0.LatencyAvg.6" + "tg__0.collect_stats.LatencyAvg.6" ], "type": "field" }, @@ -4725,7 +4750,7 @@ [ { "params": [ - "tg__0.LatencyMax.6" + "tg__0.collect_stats.LatencyMax.6" ], "type": "field" }, @@ -4833,7 +4858,7 @@ [ { "params": [ - "tg__0.LatencyAvg.7" + "tg__0.collect_stats.LatencyAvg.7" ], "type": "field" }, @@ -4871,7 +4896,7 @@ [ { "params": [ - "tg__0.LatencyMax.7" + "tg__0.collect_stats.LatencyMax.7" ], "type": "field" }, @@ -4979,7 +5004,7 @@ [ { "params": [ - "tg__0.LatencyAvg.8" + "tg__0.collect_stats.LatencyAvg.8" ], "type": "field" }, @@ -5017,7 +5042,7 @@ [ { "params": [ - "tg__0.LatencyMax.8" + "tg__0.collect_stats.LatencyMax.8" ], "type": "field" }, @@ -5783,8 +5808,8 @@ "list": [] }, "time": { - "from": "2018-02-12T15:17:27.733Z", - "to": "2018-02-12T16:44:28.270Z" + "from": "now-2d", + "to": "now" }, "timepicker": { "refresh_intervals": [ @@ -5813,5 +5838,5 @@ }, "timezone": "browser", "title": "Prox_BM_L2FWD-4Port_MultiSize", - "version": 29 -}
\ No newline at end of file + "version": 12 +} diff --git a/samples/ping-security-group.yaml b/samples/ping-security-group.yaml new file mode 100644 index 000000000..1545ee1cf --- /dev/null +++ b/samples/ping-security-group.yaml @@ -0,0 +1,74 @@ +############################################################################## +# Copyright (c) 2018 Huawei Technologies Co.,Ltd and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## +--- +# Sample ping test case using custom security group +# measure network latency using ping + +schema: "yardstick:task:0.1" + +{% set provider = provider or none %} +{% set physical_network = physical_network or 'physnet1' %} +{% set segmentation_id = segmentation_id or none %} +scenarios: +- + type: Ping + options: + packetsize: 200 + host: athena.demo + target: ares.demo + + runner: + type: Duration + duration: 60 + interval: 1 + + sla: + max_rtt: 10 + action: monitor + +context: + name: demo + image: yardstick-image + flavor: yardstick-flavor + user: ubuntu + security_group: + rules: + - remote_ip_prefix: "0.0.0.0/0" + protocol: "tcp" + port_range_min: 1 + port_range_max: 65535 + - remote_ip_prefix: "0.0.0.0/0" + protocol: "udp" + port_range_min: 1 + port_range_max: 65535 + - remote_ip_prefix: "0.0.0.0/0" + protocol: "icmp" + + placement_groups: + pgrp1: + policy: "availability" + + servers: + athena: + floating_ip: true + placement: "pgrp1" + ares: + placement: "pgrp1" + + + networks: + test: + cidr: '10.0.1.0/24' + {% if provider == "vlan" %} + provider: {{provider}} + physical_network: {{physical_network}} + {% if segmentation_id %} + segmentation_id: {{segmentation_id}} + {% endif %} + {% endif %} diff --git a/samples/ping_bottlenecks.yaml b/samples/ping_bottlenecks.yaml index 096d70e67..6a586cb90 100644 --- a/samples/ping_bottlenecks.yaml +++ b/samples/ping_bottlenecks.yaml @@ -49,6 +49,18 @@ contexts: hw:mem_page_size: "large" {% endif %} user: ubuntu + security_group: + rules: + - remote_ip_prefix: "0.0.0.0/0" + protocol: "tcp" + port_range_min: 1 + port_range_max: 65535 + - remote_ip_prefix: "0.0.0.0/0" + protocol: "udp" + port_range_min: 1 + port_range_max: 65535 + - remote_ip_prefix: "0.0.0.0/0" + protocol: "icmp" placement_groups: pgrp1: diff --git a/yardstick/benchmark/contexts/heat.py b/yardstick/benchmark/contexts/heat.py index f118ffc32..c3c5451bd 100644 --- a/yardstick/benchmark/contexts/heat.py +++ b/yardstick/benchmark/contexts/heat.py @@ -59,6 +59,7 @@ class HeatContext(Context): self.server_groups = [] self.keypair_name = None self.secgroup_name = None + self.security_group = None self._server_map = {} self.attrs = {} self._image = None @@ -118,8 +119,11 @@ class HeatContext(Context): return self.keypair_name = h_join(self.name, "key") + self.secgroup_name = h_join(self.name, "secgroup") + self.security_group = attrs.get("security_group") + self._image = attrs.get("image") self._flavor = attrs.get("flavor") @@ -185,7 +189,7 @@ class HeatContext(Context): self.flavors.add(flavor) template.add_keypair(self.keypair_name, self.name) - template.add_security_group(self.secgroup_name) + template.add_security_group(self.secgroup_name, self.security_group) for network in self.networks.values(): # Using existing network diff --git a/yardstick/benchmark/contexts/standalone/model.py b/yardstick/benchmark/contexts/standalone/model.py index ecddcbbe0..962cb48e2 100644 --- a/yardstick/benchmark/contexts/standalone/model.py +++ b/yardstick/benchmark/contexts/standalone/model.py @@ -89,6 +89,17 @@ VM_TEMPLATE = """ </devices> </domain> """ + +USER_DATA_TEMPLATE = """ +cat > {user_file} <<EOF +#cloud-config +preserve_hostname: false +hostname: {host} +users: +{user_config} +EOF +""" + WAIT_FOR_BOOT = 30 @@ -268,7 +279,7 @@ class Libvirt(object): return vm_image @classmethod - def build_vm_xml(cls, connection, flavor, vm_name, index): + def build_vm_xml(cls, connection, flavor, vm_name, index, cdrom_img): """Build the XML from the configuration parameters""" memory = flavor.get('ram', '4096') extra_spec = flavor.get('extra_specs', {}) @@ -293,6 +304,9 @@ class Libvirt(object): socket=socket, threads=threads, vm_image=image, cpuset=cpuset, cputune=cputune) + # Add CD-ROM device + vm_xml = Libvirt.add_cdrom(cdrom_img, vm_xml) + return vm_xml, mac @staticmethod @@ -320,6 +334,71 @@ class Libvirt(object): et = ET.ElementTree(element=root) et.write(file_name, encoding='utf-8', method='xml') + @classmethod + def add_cdrom(cls, file_path, xml_str): + """Add a CD-ROM disk XML node in 'devices' node + + <devices> + <disk type='file' device='cdrom'> + <driver name='qemu' type='raw'/> + <source file='/var/lib/libvirt/images/data.img'/> + <target dev='hdb'/> + <readonly/> + </disk> + ... + </devices> + """ + + root = ET.fromstring(xml_str) + device = root.find('devices') + + disk = ET.SubElement(device, 'disk') + disk.set('type', 'file') + disk.set('device', 'cdrom') + + driver = ET.SubElement(disk, 'driver') + driver.set('name', 'qemu') + driver.set('type', 'raw') + + source = ET.SubElement(disk, 'source') + source.set('file', file_path) + + target = ET.SubElement(disk, 'target') + target.set('dev', 'hdb') + + ET.SubElement(disk, 'readonly') + return ET.tostring(root) + + @staticmethod + def gen_cdrom_image(connection, file_path, vm_name, vm_user, key_filename): + """Generate ISO image for CD-ROM """ + + user_config = [" - name: {user_name}", + " ssh_authorized_keys:", + " - {pub_key_str}"] + if vm_user != "root": + user_config.append(" sudo: ALL=(ALL) NOPASSWD:ALL") + + meta_data = "/tmp/meta-data" + user_data = "/tmp/user-data" + with open(".".join([key_filename, "pub"]), "r") as pub_key_file: + pub_key_str = pub_key_file.read().rstrip() + user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=vm_user) + + cmd_lst = [ + "touch %s" % meta_data, + USER_DATA_TEMPLATE.format(user_file=user_data, host=vm_name, user_config=user_conf), + "genisoimage -output {0} -volid cidata -joliet -r {1} {2}".format(file_path, + meta_data, + user_data), + "rm {0} {1}".format(meta_data, user_data), + ] + for cmd in cmd_lst: + LOG.info(cmd) + status, _, error = connection.execute(cmd) + if status: + raise exceptions.LibvirtQemuImageCreateError(error=error) + class StandaloneContextHelper(object): """ This class handles all the common code for standalone @@ -331,7 +410,7 @@ class StandaloneContextHelper(object): @staticmethod def install_req_libs(connection, extra_pkgs=None): extra_pkgs = extra_pkgs or [] - pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping"] + pkgs = ["qemu-kvm", "libvirt-bin", "bridge-utils", "numactl", "fping", "genisoimage"] pkgs.extend(extra_pkgs) cmd_template = "dpkg-query -W --showformat='${Status}\\n' \"%s\"|grep 'ok installed'" for pkg in pkgs: @@ -457,6 +536,25 @@ class StandaloneContextHelper(object): node["ip"] = ip return nodes + @classmethod + def check_update_key(cls, connection, node, vm_name, id_name, cdrom_img): + # Generate public/private keys if private key file is not provided + user_name = node.get('user') + if not user_name: + node['user'] = 'root' + user_name = node.get('user') + if not node.get('key_filename'): + key_filename = ''.join( + [constants.YARDSTICK_ROOT_PATH, + 'yardstick/resources/files/yardstick_key-', + id_name]) + ssh.SSH.gen_keys(key_filename) + node['key_filename'] = key_filename + # Update image with public key + key_filename = node.get('key_filename') + Libvirt.gen_cdrom_image(connection, cdrom_img, vm_name, user_name, key_filename) + return node + class Server(object): """ This class handles geting vnf nodes diff --git a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py index 88ad598c3..5891f798e 100644 --- a/yardstick/benchmark/contexts/standalone/ovs_dpdk.py +++ b/yardstick/benchmark/contexts/standalone/ovs_dpdk.py @@ -394,13 +394,14 @@ class OvsDpdkContext(base.Context): for index, (key, vnf) in enumerate(collections.OrderedDict( self.servers).items()): cfg = '/tmp/vm_ovs_%d.xml' % index - vm_name = "vm_%d" % index + vm_name = "vm-%d" % index + cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index # 1. Check and delete VM if already exists model.Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection) xml_str, mac = model.Libvirt.build_vm_xml( - self.connection, self.vm_flavor, vm_name, index) + self.connection, self.vm_flavor, vm_name, index, cdrom_img) # 2: Cleanup already available VMs for vfs in [vfs for vfs_name, vfs in vnf["network_ports"].items() @@ -411,16 +412,24 @@ class OvsDpdkContext(base.Context): model.Libvirt.write_file(cfg, xml_str) self.connection.put(cfg, cfg) + node = self.vnf_node.generate_vnf_instance(self.vm_flavor, + self.networks, + self.host_mgmt.get('ip'), + key, vnf, mac) + # Generate public/private keys if password or private key file is not provided + node = model.StandaloneContextHelper.check_update_key(self.connection, + node, + vm_name, + self.name, + cdrom_img) + + # store vnf node details + nodes.append(node) + # NOTE: launch through libvirt LOG.info("virsh create ...") model.Libvirt.virsh_create_vm(self.connection, cfg) self.vm_names.append(vm_name) - # build vnf node details - nodes.append(self.vnf_node.generate_vnf_instance(self.vm_flavor, - self.networks, - self.host_mgmt.get('ip'), - key, vnf, mac)) - return nodes diff --git a/yardstick/benchmark/contexts/standalone/sriov.py b/yardstick/benchmark/contexts/standalone/sriov.py index 3da12a9a8..8d410b2f3 100644 --- a/yardstick/benchmark/contexts/standalone/sriov.py +++ b/yardstick/benchmark/contexts/standalone/sriov.py @@ -225,13 +225,14 @@ class SriovContext(base.Context): for index, (key, vnf) in enumerate(collections.OrderedDict( self.servers).items()): cfg = '/tmp/vm_sriov_%s.xml' % str(index) - vm_name = "vm_%s" % str(index) + vm_name = "vm-%s" % str(index) + cdrom_img = "/var/lib/libvirt/images/cdrom-%d.img" % index # 1. Check and delete VM if already exists model.Libvirt.check_if_vm_exists_and_delete(vm_name, self.connection) xml_str, mac = model.Libvirt.build_vm_xml( - self.connection, self.vm_flavor, vm_name, index) + self.connection, self.vm_flavor, vm_name, index, cdrom_img) # 2: Cleanup already available VMs network_ports = collections.OrderedDict( @@ -243,17 +244,26 @@ class SriovContext(base.Context): model.Libvirt.write_file(cfg, xml_str) self.connection.put(cfg, cfg) + node = self.vnf_node.generate_vnf_instance(self.vm_flavor, + self.networks, + self.host_mgmt.get('ip'), + key, vnf, mac) + # Generate public/private keys if password or private key file is not provided + node = model.StandaloneContextHelper.check_update_key(self.connection, + node, + vm_name, + self.name, + cdrom_img) + + # store vnf node details + nodes.append(node) + # NOTE: launch through libvirt LOG.info("virsh create ...") model.Libvirt.virsh_create_vm(self.connection, cfg) self.vm_names.append(vm_name) - # build vnf node details - nodes.append(self.vnf_node.generate_vnf_instance( - self.vm_flavor, self.networks, self.host_mgmt.get('ip'), - key, vnf, mac)) - return nodes def _get_vf_data(self, value, vfmac, pfif): diff --git a/yardstick/benchmark/scenarios/networking/vnf_generic.py b/yardstick/benchmark/scenarios/networking/vnf_generic.py index 10f10d4e6..4884e57a8 100644 --- a/yardstick/benchmark/scenarios/networking/vnf_generic.py +++ b/yardstick/benchmark/scenarios/networking/vnf_generic.py @@ -117,8 +117,12 @@ class NetworkServiceTestCase(scenario_base.Scenario): for index, dst_port in enumerate(fflow.get("dst_port", [])): flow["dst_port_{}".format(index)] = dst_port - flow["count"] = fflow["count"] - flow["seed"] = fflow["seed"] + if "count" in fflow: + flow["count"] = fflow["count"] + + if "seed" in fflow: + flow["seed"] = fflow["seed"] + except KeyError: flow = {} return {"flow": flow} diff --git a/yardstick/network_services/traffic_profile/ixia_rfc2544.py b/yardstick/network_services/traffic_profile/ixia_rfc2544.py index 760b1e8d3..461604fb8 100644 --- a/yardstick/network_services/traffic_profile/ixia_rfc2544.py +++ b/yardstick/network_services/traffic_profile/ixia_rfc2544.py @@ -96,7 +96,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): 'count': ip['count'], 'dscp': ip['dscp'], 'ttl': ip['ttl'], - 'seed': ip['seed'], + 'seed': ip.get('seed', 1), 'srcip': srcip, 'dstip': dstip, 'srcmask': srcmask, @@ -110,7 +110,7 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): 'srcportmask': src_port_mask, 'dstportmask': dst_port_mask, 'count': outer_l4['count'], - 'seed': outer_l4['seed'], + 'seed': outer_l4.get('seed', 1) } } @@ -179,10 +179,6 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): except ZeroDivisionError: LOG.info('No traffic is flowing') - samples['TxThroughput'] = tx_throughput - samples['RxThroughput'] = rx_throughput - samples['DropPercentage'] = drop_percent - if first_run: completed = True if drop_percent <= tolerance else False if (first_run and @@ -196,4 +192,21 @@ class IXIARFC2544Profile(trex_traffic_profile.TrexProfile): else: completed = True + latency_ns_avg = float( + sum([samples[iface]['Store-Forward_Avg_latency_ns'] + for iface in samples])) / num_ifaces + latency_ns_min = float( + sum([samples[iface]['Store-Forward_Min_latency_ns'] + for iface in samples])) / num_ifaces + latency_ns_max = float( + sum([samples[iface]['Store-Forward_Max_latency_ns'] + for iface in samples])) / num_ifaces + + samples['TxThroughput'] = tx_throughput + samples['RxThroughput'] = rx_throughput + samples['DropPercentage'] = drop_percent + samples['latency_ns_avg'] = latency_ns_avg + samples['latency_ns_min'] = latency_ns_min + samples['latency_ns_max'] = latency_ns_max + return completed, samples diff --git a/yardstick/network_services/traffic_profile/prox_binsearch.py b/yardstick/network_services/traffic_profile/prox_binsearch.py index 506a880e0..16a0411ec 100644 --- a/yardstick/network_services/traffic_profile/prox_binsearch.py +++ b/yardstick/network_services/traffic_profile/prox_binsearch.py @@ -25,6 +25,14 @@ from yardstick.common import constants as overall_constants LOG = logging.getLogger(__name__) +STATUS_SUCCESS = "Success" +STATUS_FAIL = "Failure" +STATUS_RESULT = "Result" +STEP_CONFIRM = "Confirm retry" +STEP_INCREASE_LOWER = "Increase lower" +STEP_DECREASE_LOWER = "Decrease lower" +STEP_DECREASE_UPPER = "Decrease upper" + class ProxBinSearchProfile(ProxProfile): """ @@ -85,18 +93,16 @@ class ProxBinSearchProfile(ProxProfile): # success, the binary search will complete on an integer multiple # of the precision, rather than on a fraction of it. - theor_max_thruput = actual_max_thruput = 0 + theor_max_thruput = 0 result_samples = {} - # Store one time only value in influxdb - single_samples = { + test_data = { "test_duration": traffic_gen.scenario_helper.scenario_cfg["runner"]["duration"], "test_precision": self.params["traffic_profile"]["test_precision"], "tolerated_loss": self.params["traffic_profile"]["tolerated_loss"], "duration": duration } - self.queue.put(single_samples) self.prev_time = time.time() # throughput and packet loss from the most recent successful test @@ -110,85 +116,88 @@ class ProxBinSearchProfile(ProxProfile): neg_retry = 0 total_retry = 0 - LOG.info("Checking MAX %s MIN %s TEST %s", - self.current_upper, self.lower_bound, test_value) + LOG.info("Checking MAX %s MIN %s TEST %s", self.current_upper, + self.lower_bound, test_value) + while (pos_retry <= ok_retry) and (neg_retry <= ok_retry): total_retry = total_retry + 1 + result, port_samples = self._profile_helper.run_test(pkt_size, duration, test_value, self.tolerated_loss, line_speed) - if (total_retry > (ok_retry * 3)) and (ok_retry is not 0): - LOG.info("Failure.!! .. RETRY EXCEEDED ... decrease lower bound") + if (total_retry > (ok_retry * 3)) and (ok_retry is not 0): + status = STATUS_FAIL + next_step = STEP_DECREASE_LOWER successful_pkt_loss = result.pkt_loss - samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples) - self.current_upper = test_value neg_retry = total_retry elif result.success: if (pos_retry < ok_retry) and (ok_retry is not 0): - neg_retry = 0 - LOG.info("Success! ... confirm retry") - + status = STATUS_SUCCESS + next_step = STEP_CONFIRM successful_pkt_loss = result.pkt_loss - samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples) - + neg_retry = 0 else: - LOG.info("Success! Increasing lower bound") + status = STATUS_SUCCESS + next_step = STEP_INCREASE_LOWER self.current_lower = test_value - successful_pkt_loss = result.pkt_loss - samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples) - - # store results with success tag in influxdb - success_samples = \ - {'Success_' + key: value for key, value in samples.items()} - - success_samples["Success_rx_total"] = int(result.rx_total) - success_samples["Success_tx_total"] = int(result.tx_total) - success_samples["Success_can_be_lost"] = int(result.can_be_lost) - success_samples["Success_drop_total"] = int(result.drop_total) - success_samples["Success_RxThroughput"] = samples["RxThroughput"] - success_samples["Success_RxThroughput_gbps"] = \ - (samples["RxThroughput"] / 1000) * ((pkt_size + 20)* 8) - LOG.info(">>>##>>Collect SUCCESS TG KPIs %s %s", - datetime.datetime.now(), success_samples) - self.queue.put(success_samples, True, overall_constants.QUEUE_PUT_TIMEOUT) - - # Store Actual throughput for result samples - actual_max_thruput = success_samples["Success_RxThroughput"] pos_retry = pos_retry + 1 else: if (neg_retry < ok_retry) and (ok_retry is not 0): - + status = STATUS_FAIL + next_step = STEP_CONFIRM pos_retry = 0 - LOG.info("failure! ... confirm retry") else: - LOG.info("Failure... Decreasing upper bound") + status = STATUS_FAIL + next_step = STEP_DECREASE_UPPER self.current_upper = test_value neg_retry = neg_retry + 1 - samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples) + + LOG.info( + "Status = '%s' Next_Step = '%s'", status, next_step) + + samples = result.get_samples(pkt_size, successful_pkt_loss, port_samples) if theor_max_thruput < samples["TxThroughput"]: theor_max_thruput = samples['TxThroughput'] - self.queue.put({'theor_max_throughput': theor_max_thruput}) - - LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples) + samples['theor_max_throughput'] = theor_max_thruput + + samples["rx_total"] = int(result.rx_total) + samples["tx_total"] = int(result.tx_total) + samples["can_be_lost"] = int(result.can_be_lost) + samples["drop_total"] = int(result.drop_total) + samples["RxThroughput_gbps"] = \ + (samples["RxThroughput"] / 1000) * ((pkt_size + 20) * 8) + samples['Status'] = status + samples['Next_Step'] = next_step samples["MAX_Rate"] = self.current_upper samples["MIN_Rate"] = self.current_lower samples["Test_Rate"] = test_value samples["Step_Id"] = step_id samples["Confirmation_Retry"] = total_retry + + samples.update(test_data) + + if status == STATUS_SUCCESS and next_step == STEP_INCREASE_LOWER: + # Store success samples for result samples + result_samples = samples + + LOG.info(">>>##>>Collect TG KPIs %s %s", datetime.datetime.now(), samples) + self.queue.put(samples, True, overall_constants.QUEUE_PUT_TIMEOUT) - LOG.info(">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s", - pkt_size, theor_max_thruput, actual_max_thruput) - result_samples["Result_pktSize"] = pkt_size - result_samples["Result_theor_max_throughput"] = theor_max_thruput - result_samples["Result_Actual_throughput"] = actual_max_thruput + LOG.info( + ">>>##>> Result Reached PktSize %s Theor_Max_Thruput %s Actual_throughput %s", + pkt_size, theor_max_thruput, result_samples.get("RxThroughput", 0)) + result_samples["Status"] = STATUS_RESULT + result_samples["Next_Step"] = "" + result_samples["Actual_throughput"] = result_samples.get("RxThroughput", 0) + result_samples["theor_max_throughput"] = theor_max_thruput self.queue.put(result_samples) diff --git a/yardstick/network_services/vnf_generic/vnf/tg_prox.py b/yardstick/network_services/vnf_generic/vnf/tg_prox.py index 854319a21..d12c42ec8 100644 --- a/yardstick/network_services/vnf_generic/vnf/tg_prox.py +++ b/yardstick/network_services/vnf_generic/vnf/tg_prox.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import - import logging +import copy from yardstick.network_services.utils import get_nsb_option from yardstick.network_services.vnf_generic.vnf.prox_vnf import ProxApproxVnf @@ -32,7 +31,9 @@ class ProxTrafficGen(SampleVNFTrafficGen): def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, resource_helper_type=None): - # don't call superclass, use custom wrapper of ProxApproxVnf + vnfd_cpy = copy.deepcopy(vnfd) + super(ProxTrafficGen, self).__init__(name, vnfd_cpy, task_id) + self._vnf_wrapper = ProxApproxVnf( name, vnfd, task_id, setup_env_helper_type, resource_helper_type) self.bin_path = get_nsb_option('bin_path', '') diff --git a/yardstick/orchestrator/heat.py b/yardstick/orchestrator/heat.py index 99a5760a3..9da4948dd 100644 --- a/yardstick/orchestrator/heat.py +++ b/yardstick/orchestrator/heat.py @@ -471,68 +471,77 @@ name (i.e. %s). 'value': {'get_resource': name} } - def add_security_group(self, name): + def add_security_group(self, name, security_group=None): """add to the template a Neutron SecurityGroup""" log.debug("adding Neutron::SecurityGroup '%s'", name) + description = ("Group allowing IPv4 and IPv6 for icmp and upd/tcp on" + "all ports") + rules = [ + {'remote_ip_prefix': '0.0.0.0/0', + 'protocol': 'tcp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'protocol': 'udp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'protocol': 'icmp'}, + {'remote_ip_prefix': '::/0', + 'ethertype': 'IPv6', + 'protocol': 'tcp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '::/0', + 'ethertype': 'IPv6', + 'protocol': 'udp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '::/0', + 'ethertype': 'IPv6', + 'protocol': 'ipv6-icmp'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'direction': 'egress', + 'protocol': 'tcp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'direction': 'egress', + 'protocol': 'udp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '0.0.0.0/0', + 'direction': 'egress', + 'protocol': 'icmp'}, + {'remote_ip_prefix': '::/0', + 'direction': 'egress', + 'ethertype': 'IPv6', + 'protocol': 'tcp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '::/0', + 'direction': 'egress', + 'ethertype': 'IPv6', + 'protocol': 'udp', + 'port_range_min': '1', + 'port_range_max': '65535'}, + {'remote_ip_prefix': '::/0', + 'direction': 'egress', + 'ethertype': 'IPv6', + 'protocol': 'ipv6-icmp'}, + ] + if security_group: + description = "Custom security group rules defined by the user" + rules = security_group.get('rules') + + log.debug("The security group rules is %s", rules) + self.resources[name] = { 'type': 'OS::Neutron::SecurityGroup', 'properties': { 'name': name, - 'description': "Group allowing IPv4 and IPv6 for icmp and upd/tcp on all ports", - 'rules': [ - {'remote_ip_prefix': '0.0.0.0/0', - 'protocol': 'tcp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '0.0.0.0/0', - 'protocol': 'udp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '0.0.0.0/0', - 'protocol': 'icmp'}, - {'remote_ip_prefix': '::/0', - 'ethertype': 'IPv6', - 'protocol': 'tcp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '::/0', - 'ethertype': 'IPv6', - 'protocol': 'udp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '::/0', - 'ethertype': 'IPv6', - 'protocol': 'ipv6-icmp'}, - {'remote_ip_prefix': '0.0.0.0/0', - 'direction': 'egress', - 'protocol': 'tcp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '0.0.0.0/0', - 'direction': 'egress', - 'protocol': 'udp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '0.0.0.0/0', - 'direction': 'egress', - 'protocol': 'icmp'}, - {'remote_ip_prefix': '::/0', - 'direction': 'egress', - 'ethertype': 'IPv6', - 'protocol': 'tcp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '::/0', - 'direction': 'egress', - 'ethertype': 'IPv6', - 'protocol': 'udp', - 'port_range_min': '1', - 'port_range_max': '65535'}, - {'remote_ip_prefix': '::/0', - 'direction': 'egress', - 'ethertype': 'IPv6', - 'protocol': 'ipv6-icmp'}, - ] + 'description': description, + 'rules': rules } } diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py index 8ad581918..371e4ef36 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_model.py @@ -46,6 +46,16 @@ XML_SAMPLE_INTERFACE = """<?xml version="1.0"?> class ModelLibvirtTestCase(unittest.TestCase): + XML_STR = model.VM_TEMPLATE.format( + vm_name="vm_name", + random_uuid=uuid.uuid4(), + mac_addr="00:01:02:03:04:05", + memory=2048, vcpu=2, cpu=2, + numa_cpus=0 - 10, + socket=1, threads=1, + vm_image="/var/lib/libvirt/images/yardstick-nsb-image.img", + cpuset=2 - 10, cputune='') + def setUp(self): self.pci_address_str = '0001:04:03.2' self.pci_address = utils.PciAddress(self.pci_address_str) @@ -66,34 +76,34 @@ class ModelLibvirtTestCase(unittest.TestCase): ssh_mock.execute = mock.Mock(return_value=(0, "a", "")) ssh.return_value = ssh_mock # NOTE(ralonsoh): this test doesn't cover function execution. - model.Libvirt.check_if_vm_exists_and_delete("vm_0", ssh_mock) + model.Libvirt.check_if_vm_exists_and_delete('vm-0', ssh_mock) def test_virsh_create_vm(self): self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0)) - model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0') - self.mock_ssh.execute.assert_called_once_with('virsh create vm_0') + model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm-0') + self.mock_ssh.execute.assert_called_once_with('virsh create vm-0') def test_virsh_create_vm_error(self): self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_create')) with self.assertRaises(exceptions.LibvirtCreateError) as exc: - model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm_0') + model.Libvirt.virsh_create_vm(self.mock_ssh, 'vm-0') self.assertEqual('Error creating the virtual machine. Error: ' 'error_create.', str(exc.exception)) - self.mock_ssh.execute.assert_called_once_with('virsh create vm_0') + self.mock_ssh.execute.assert_called_once_with('virsh create vm-0') def test_virsh_destroy_vm(self): self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0)) - model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh) - self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0') + model.Libvirt.virsh_destroy_vm('vm-0', self.mock_ssh) + self.mock_ssh.execute.assert_called_once_with('virsh destroy vm-0') @mock.patch.object(model, 'LOG') def test_virsh_destroy_vm_error(self, mock_logger): self.mock_ssh.execute = mock.Mock(return_value=(1, 0, 'error_destroy')) mock_logger.warning = mock.Mock() - model.Libvirt.virsh_destroy_vm('vm_0', self.mock_ssh) + model.Libvirt.virsh_destroy_vm('vm-0', self.mock_ssh) mock_logger.warning.assert_called_once_with( - 'Error destroying VM %s. Error: %s', 'vm_0', 'error_destroy') - self.mock_ssh.execute.assert_called_once_with('virsh destroy vm_0') + 'Error destroying VM %s. Error: %s', 'vm-0', 'error_destroy') + self.mock_ssh.execute.assert_called_once_with('virsh destroy vm-0') def test_add_interface_address(self): xml = ElementTree.ElementTree( @@ -171,6 +181,56 @@ class ModelLibvirtTestCase(unittest.TestCase): self.assertEqual('0x' + vm_pci.split(':')[2].split('.')[1], interface_address.get('function')) + def test_add_cdrom(self): + xml_input = copy.deepcopy(XML_SAMPLE) + xml_output = model.Libvirt.add_cdrom('/var/lib/libvirt/images/data.img', xml_input) + + root = ElementTree.fromstring(xml_output) + et_out = ElementTree.ElementTree(element=root) + disk = et_out.find('devices').find('disk') + self.assertEqual('file', disk.get('type')) + self.assertEqual('cdrom', disk.get('device')) + driver = disk.find('driver') + self.assertEqual('qemu', driver.get('name')) + self.assertEqual('raw', driver.get('type')) + source = disk.find('source') + self.assertEqual('/var/lib/libvirt/images/data.img', source.get('file')) + target = disk.find('target') + self.assertEqual('hdb', target.get('dev')) + self.assertIsNotNone(disk.find('readonly')) + + def test_gen_cdrom_image(self): + self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0)) + root = ElementTree.fromstring(self.XML_STR) + hostname = root.find('name').text + meta_data = "/tmp/meta-data" + user_data = "/tmp/user-data" + file_path = "/tmp/cdrom-0.img" + key_filename = "id_rsa" + pub_key_str = "KEY" + user = 'root' + user_config = [" - name: {user_name}", + " ssh_authorized_keys:", + " - {pub_key_str}"] + + user_conf = os.linesep.join(user_config).format(pub_key_str=pub_key_str, user_name=user) + with mock.patch('six.moves.builtins.open', mock.mock_open(read_data=pub_key_str), + create=True) as mock_file: + with open(key_filename, "r") as h: + result = h.read() + model.Libvirt.gen_cdrom_image(self.mock_ssh, file_path, hostname, user, key_filename) + mock_file.assert_called_with(".".join([key_filename, "pub"]), "r") + self.assertEqual(result, pub_key_str) + + self.mock_ssh.execute.assert_has_calls([ + mock.call("touch %s" % meta_data), + mock.call(model.USER_DATA_TEMPLATE.format(user_file=user_data, host=hostname, + user_config=user_conf)), + mock.call("genisoimage -output {0} -volid cidata" + " -joliet -r {1} {2}".format(file_path, meta_data, user_data)), + mock.call("rm {0} {1}".format(meta_data, user_data)) + ]) + def test_create_snapshot_qemu(self): self.mock_ssh.execute = mock.Mock(return_value=(0, 0, 0)) index = 1 @@ -211,6 +271,19 @@ class ModelLibvirtTestCase(unittest.TestCase): self.mock_ssh.put_file.assert_called_once_with(base_image, '/tmp/base_image') + @mock.patch.object(model.Libvirt, 'gen_cdrom_image') + def test_check_update_key(self, mock_gen_cdrom_image): + node = {'user': 'defuser', 'key_filename': '/home/ubuntu/id_rsa'} + cdrom_img = "/var/lib/libvirt/images/data.img" + id_name = 'fake_name' + key_filename = node.get('key_filename') + root = ElementTree.fromstring(self.XML_STR) + hostname = root.find('name').text + model.StandaloneContextHelper.check_update_key(self.mock_ssh, node, hostname, id_name, + cdrom_img) + mock_gen_cdrom_image.assert_called_once_with(self.mock_ssh, cdrom_img, hostname, + node.get('user'), key_filename) + @mock.patch.object(os, 'access', return_value=False) def test_create_snapshot_qemu_no_image_local(self, mock_os_access): self.mock_ssh.execute = mock.Mock(side_effect=[(0, 0, 0), (1, 0, 0)]) @@ -253,18 +326,20 @@ class ModelLibvirtTestCase(unittest.TestCase): mac = model.StandaloneContextHelper.get_mac_address(0x00) _uuid = uuid.uuid4() connection = mock.Mock() + cdrom_img = '/tmp/cdrom-0.img' with mock.patch.object(model.StandaloneContextHelper, 'get_mac_address', return_value=mac) as \ mock_get_mac_address, \ mock.patch.object(uuid, 'uuid4', return_value=_uuid): xml_out, mac = model.Libvirt.build_vm_xml( - connection, flavor, 'vm_name', 100) + connection, flavor, 'vm_name', 100, cdrom_img) xml_ref = model.VM_TEMPLATE.format(vm_name='vm_name', random_uuid=_uuid, mac_addr=mac, memory='1024', vcpu='8', cpu='4', numa_cpus='0-7', socket='3', threads='2', vm_image='qemu_image', cpuset='4,5', cputune='cool') - self.assertEqual(xml_ref, xml_out) + xml_ref = model.Libvirt.add_cdrom(cdrom_img, xml_ref) + self.assertEqual(xml_out, xml_ref) mock_get_mac_address.assert_called_once_with(0x00) mock_create_snapshot_qemu.assert_called_once_with( connection, 100, 'images') @@ -296,6 +371,7 @@ class ModelLibvirtTestCase(unittest.TestCase): status = model.Libvirt.pin_vcpu_for_perf(ssh_mock, 4) self.assertIsNotNone(status) + class StandaloneContextHelperTestCase(unittest.TestCase): NODE_SAMPLE = "nodes_sample.yaml" @@ -463,7 +539,7 @@ class ServerTestCase(unittest.TestCase): } } status = self.server.generate_vnf_instance( - {}, self.NETWORKS, '1.1.1.1/24', 'vm_0', vnf, '00:00:00:00:00:01') + {}, self.NETWORKS, '1.1.1.1/24', 'vm-0', vnf, '00:00:00:00:00:01') self.assertIsNotNone(status) diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py index 69779d3e0..1a2407575 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_ovs_dpdk.py @@ -231,8 +231,8 @@ class OvsDpdkContextTestCase(unittest.TestCase): def test_undeploy(self, mock_libvirt): self.ovs_dpdk.vm_deploy = True self.ovs_dpdk.connection = mock.Mock() - self.ovs_dpdk.vm_names = ['vm_0', 'vm_1'] - self.ovs_dpdk.drivers = ['vm_0', 'vm_1'] + self.ovs_dpdk.vm_names = ['vm-0', 'vm-1'] + self.ovs_dpdk.drivers = ['vm-0', 'vm-1'] self.ovs_dpdk.cleanup_ovs_dpdk_env = mock.Mock() self.ovs_dpdk.networks = self.NETWORKS self.ovs_dpdk.undeploy() @@ -370,7 +370,7 @@ class OvsDpdkContextTestCase(unittest.TestCase): ssh.return_value = ssh_mock self.ovs_dpdk.vm_deploy = True self.ovs_dpdk.connection = ssh_mock - self.ovs_dpdk.vm_names = ['vm_0', 'vm_1'] + self.ovs_dpdk.vm_names = ['vm-0', 'vm-1'] self.ovs_dpdk.drivers = [] self.ovs_dpdk.networks = self.NETWORKS self.ovs_dpdk.helper.get_mac_address = mock.Mock(return_value="") @@ -381,7 +381,7 @@ class OvsDpdkContextTestCase(unittest.TestCase): def test__enable_interfaces(self, mock_add_ovs_interface): self.ovs_dpdk.vm_deploy = True self.ovs_dpdk.connection = mock.Mock() - self.ovs_dpdk.vm_names = ['vm_0', 'vm_1'] + self.ovs_dpdk.vm_names = ['vm-0', 'vm-1'] self.ovs_dpdk.drivers = [] self.ovs_dpdk.networks = self.NETWORKS self.ovs_dpdk.ovs_properties = {'vpath': 'fake_path'} @@ -391,15 +391,16 @@ class OvsDpdkContextTestCase(unittest.TestCase): 'fake_path', 0, self.NETWORKS['private_0']['vpci'], self.NETWORKS['private_0']['mac'], 'test') + @mock.patch.object(model.StandaloneContextHelper, 'check_update_key') @mock.patch.object(model.Libvirt, 'write_file') @mock.patch.object(model.Libvirt, 'build_vm_xml') @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete') @mock.patch.object(model.Libvirt, 'virsh_create_vm') - def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, - mock_build_xml, mock_write_file): + def test_setup_ovs_dpdk_context(self, mock_create_vm, mock_check_if_exists, mock_build_xml, + mock_write_file, mock_check_update_key): self.ovs_dpdk.vm_deploy = True self.ovs_dpdk.connection = mock.Mock() - self.ovs_dpdk.vm_names = ['vm_0', 'vm_1'] + self.ovs_dpdk.vm_names = ['vm-0', 'vm-1'] self.ovs_dpdk.drivers = [] self.ovs_dpdk.servers = { 'vnf_0': { @@ -413,23 +414,32 @@ class OvsDpdkContextTestCase(unittest.TestCase): self.ovs_dpdk.networks = self.NETWORKS self.ovs_dpdk.host_mgmt = {} self.ovs_dpdk.flavor = {} + self.ovs_dpdk.file_path = '/var/lib/libvirt/images/cdrom-0.img' self.ovs_dpdk.configure_nics_for_ovs_dpdk = mock.Mock(return_value="") - xml_str = mock.Mock() + self.ovs_dpdk._name_task_id = 'fake_name' + xml_str = 'vm-0' mock_build_xml.return_value = (xml_str, '00:00:00:00:00:01') self.ovs_dpdk._enable_interfaces = mock.Mock(return_value=xml_str) vnf_instance = mock.Mock() + vnf_instance_2 = mock.Mock() + mock_check_update_key.return_value = vnf_instance_2 self.ovs_dpdk.vnf_node.generate_vnf_instance = mock.Mock( return_value=vnf_instance) - self.assertEqual([vnf_instance], + self.assertEqual([vnf_instance_2], self.ovs_dpdk.setup_ovs_dpdk_context()) mock_create_vm.assert_called_once_with( self.ovs_dpdk.connection, '/tmp/vm_ovs_0.xml') mock_check_if_exists.assert_called_once_with( - 'vm_0', self.ovs_dpdk.connection) + 'vm-0', self.ovs_dpdk.connection) mock_build_xml.assert_called_once_with( - self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm_0', 0) + self.ovs_dpdk.connection, self.ovs_dpdk.vm_flavor, 'vm-0', 0, self.ovs_dpdk.file_path) mock_write_file.assert_called_once_with('/tmp/vm_ovs_0.xml', xml_str) + mock_check_update_key.assert_called_once_with(self.ovs_dpdk.connection, + vnf_instance, + xml_str, + self.ovs_dpdk._name_task_id, + self.ovs_dpdk.file_path) @mock.patch.object(io, 'BytesIO') def test__check_hugepages(self, mock_bytesio): diff --git a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py index 74c31569c..ae8e95f9a 100644 --- a/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py +++ b/yardstick/tests/unit/benchmark/contexts/standalone/test_sriov.py @@ -113,8 +113,8 @@ class SriovContextTestCase(unittest.TestCase): self.sriov.vm_deploy = True self.sriov.connection = mock_ssh - self.sriov.vm_names = ['vm_0', 'vm_1'] - self.sriov.drivers = ['vm_0', 'vm_1'] + self.sriov.vm_names = ['vm-0', 'vm-1'] + self.sriov.drivers = ['vm-0', 'vm-1'] self.assertIsNone(self.sriov.undeploy()) def _get_file_abspath(self, filename): @@ -254,7 +254,7 @@ class SriovContextTestCase(unittest.TestCase): ssh.return_value = ssh_mock self.sriov.vm_deploy = True self.sriov.connection = ssh_mock - self.sriov.vm_names = ['vm_0', 'vm_1'] + self.sriov.vm_names = ['vm-0', 'vm-1'] self.sriov.drivers = [] self.sriov.networks = self.NETWORKS self.sriov.helper.get_mac_address = mock.Mock(return_value="") @@ -267,7 +267,7 @@ class SriovContextTestCase(unittest.TestCase): def test__enable_interfaces(self, mock_add_sriov, mock_ssh): self.sriov.vm_deploy = True self.sriov.connection = mock_ssh - self.sriov.vm_names = ['vm_0', 'vm_1'] + self.sriov.vm_names = ['vm-0', 'vm-1'] self.sriov.drivers = [] self.sriov.networks = self.NETWORKS self.assertEqual( @@ -276,12 +276,13 @@ class SriovContextTestCase(unittest.TestCase): mock_add_sriov.assert_called_once_with( '0000:00:0a.0', 0, self.NETWORKS['private_0']['mac'], 'test') + @mock.patch.object(model.StandaloneContextHelper, 'check_update_key') @mock.patch.object(model.Libvirt, 'build_vm_xml') @mock.patch.object(model.Libvirt, 'check_if_vm_exists_and_delete') @mock.patch.object(model.Libvirt, 'write_file') @mock.patch.object(model.Libvirt, 'virsh_create_vm') - def test_setup_sriov_context(self, mock_create_vm, mock_write_file, - mock_check, mock_build_vm_xml): + def test_setup_sriov_context(self, mock_create_vm, mock_write_file, mock_check, + mock_build_vm_xml, mock_check_update_key): self.sriov.servers = { 'vnf_0': { 'network_ports': { @@ -297,24 +298,29 @@ class SriovContextTestCase(unittest.TestCase): self.sriov.vm_flavor = 'flavor' self.sriov.networks = 'networks' self.sriov.configure_nics_for_sriov = mock.Mock() + self.sriov._name_task_id = 'fake_name' cfg = '/tmp/vm_sriov_0.xml' - vm_name = 'vm_0' + vm_name = 'vm-0' xml_out = mock.Mock() mock_build_vm_xml.return_value = (xml_out, '00:00:00:00:00:01') + mock_check_update_key.return_value = 'node_2' + cdrom_img = '/var/lib/libvirt/images/cdrom-0.img' with mock.patch.object(self.sriov, 'vnf_node') as mock_vnf_node, \ mock.patch.object(self.sriov, '_enable_interfaces') as \ mock_enable_interfaces: mock_enable_interfaces.return_value = 'out_xml' mock_vnf_node.generate_vnf_instance = mock.Mock( - return_value='node') + return_value='node_1') nodes_out = self.sriov.setup_sriov_context() - self.assertEqual(['node'], nodes_out) + mock_check_update_key.assert_called_once_with(connection, 'node_1', vm_name, + self.sriov._name_task_id, cdrom_img) + self.assertEqual(['node_2'], nodes_out) mock_vnf_node.generate_vnf_instance.assert_called_once_with( 'flavor', 'networks', '1.2.3.4', 'vnf_0', self.sriov.servers['vnf_0'], '00:00:00:00:00:01') mock_build_vm_xml.assert_called_once_with( - connection, 'flavor', vm_name, 0) + connection, 'flavor', vm_name, 0, cdrom_img) mock_create_vm.assert_called_once_with(connection, cfg) mock_check.assert_called_once_with(vm_name, connection) mock_write_file.assert_called_once_with(cfg, 'out_xml') @@ -332,7 +338,7 @@ class SriovContextTestCase(unittest.TestCase): ssh.return_value = ssh_mock self.sriov.vm_deploy = True self.sriov.connection = ssh_mock - self.sriov.vm_names = ['vm_0', 'vm_1'] + self.sriov.vm_names = ['vm-0', 'vm-1'] self.sriov.drivers = [] self.sriov.servers = { 'vnf_0': { diff --git a/yardstick/tests/unit/benchmark/contexts/test_heat.py b/yardstick/tests/unit/benchmark/contexts/test_heat.py index 7782d96bd..3ccae44c7 100644 --- a/yardstick/tests/unit/benchmark/contexts/test_heat.py +++ b/yardstick/tests/unit/benchmark/contexts/test_heat.py @@ -73,6 +73,7 @@ class HeatContextTestCase(unittest.TestCase): self.assertEqual(self.test_context.server_groups, []) self.assertIsNone(self.test_context.keypair_name) self.assertIsNone(self.test_context.secgroup_name) + self.assertIsNone(self.test_context.security_group) self.assertEqual(self.test_context._server_map, {}) self.assertIsNone(self.test_context._image) self.assertIsNone(self.test_context._flavor) @@ -192,7 +193,7 @@ class HeatContextTestCase(unittest.TestCase): mock_template.add_keypair.assert_called_with( "ctx-key", "ctx-12345678") - mock_template.add_security_group.assert_called_with("ctx-secgroup") + mock_template.add_security_group.assert_called_with("ctx-secgroup", None) mock_template.add_network.assert_called_with( "ctx-12345678-mynet", 'physnet1', None, None, None, None) mock_template.add_router.assert_called_with( diff --git a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py index 49578b383..6bf2f2c2f 100644 --- a/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py +++ b/yardstick/tests/unit/benchmark/scenarios/networking/test_vnf_generic.py @@ -405,7 +405,6 @@ class TestNetworkServiceTestCase(unittest.TestCase): def test___get_traffic_flow(self): self.scenario_cfg["traffic_options"]["flow"] = \ self._get_file_abspath("ipv4_1flow_Packets_vpe.yaml") - self.scenario_cfg["options"] = {} self.scenario_cfg['options'] = { 'flow': { 'src_ip': [ @@ -421,11 +420,10 @@ class TestNetworkServiceTestCase(unittest.TestCase): 'public_ip': ['1.1.1.1'], }, } - # NOTE(ralonsoh): check the expected output. This test could be - # incorrect - # result = {'flow': {'dst_ip0': '152.16.40.2-152.16.40.254', - # 'src_ip0': '152.16.100.2-152.16.100.254'}} - self.assertEqual({'flow': {}}, self.s._get_traffic_flow()) + expected_flow = {'flow': {'dst_ip_0': '152.16.40.2-152.16.40.254', + 'public_ip_0': '1.1.1.1', + 'src_ip_0': '152.16.100.2-152.16.100.254'}} + self.assertEqual(expected_flow, self.s._get_traffic_flow()) def test___get_traffic_flow_error(self): self.scenario_cfg["traffic_options"]["flow"] = \ diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py index 27ab4607b..0759ecebd 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_ixia_rfc2544.py @@ -575,9 +575,15 @@ class TestIXIARFC2544Profile(unittest.TestCase): def test_get_drop_percentage_completed(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000}, + {'in_packets': 1000, 'out_packets': 1000, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007} + {'in_packets': 1005, 'out_packets': 1007, + 'Store-Forward_Avg_latency_ns': 23, + 'Store-Forward_Min_latency_ns': 13, + 'Store-Forward_Max_latency_ns': 28} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) completed, samples = rfc2544_profile.get_drop_percentage(samples, 0, 1) @@ -585,12 +591,21 @@ class TestIXIARFC2544Profile(unittest.TestCase): self.assertEqual(66.9, samples['TxThroughput']) self.assertEqual(66.833, samples['RxThroughput']) self.assertEqual(0.099651, samples['DropPercentage']) + self.assertEqual(21.5, samples['latency_ns_avg']) + self.assertEqual(14.0, samples['latency_ns_min']) + self.assertEqual(26.5, samples['latency_ns_max']) def test_get_drop_percentage_over_drop_percentage(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000}, + {'in_packets': 1000, 'out_packets': 1000, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007} + {'in_packets': 1005, 'out_packets': 1007, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.rate = 1000 @@ -604,9 +619,15 @@ class TestIXIARFC2544Profile(unittest.TestCase): def test_get_drop_percentage_under_drop_percentage(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000}, + {'in_packets': 1000, 'out_packets': 1000, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007} + {'in_packets': 1005, 'out_packets': 1007, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.rate = 1000 @@ -621,9 +642,15 @@ class TestIXIARFC2544Profile(unittest.TestCase): @mock.patch.object(ixia_rfc2544.LOG, 'info') def test_get_drop_percentage_not_flow(self, *args): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 0}, + {'in_packets': 1000, 'out_packets': 0, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 0} + {'in_packets': 1005, 'out_packets': 0, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) rfc2544_profile.rate = 1000 @@ -637,9 +664,15 @@ class TestIXIARFC2544Profile(unittest.TestCase): def test_get_drop_percentage_first_run(self): samples = {'iface_name_1': - {'in_packets': 1000, 'out_packets': 1000}, + {'in_packets': 1000, 'out_packets': 1000, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25}, 'iface_name_2': - {'in_packets': 1005, 'out_packets': 1007} + {'in_packets': 1005, 'out_packets': 1007, + 'Store-Forward_Avg_latency_ns': 20, + 'Store-Forward_Min_latency_ns': 15, + 'Store-Forward_Max_latency_ns': 25} } rfc2544_profile = ixia_rfc2544.IXIARFC2544Profile(self.TRAFFIC_PROFILE) completed, samples = rfc2544_profile.get_drop_percentage( diff --git a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py index c062308e8..c09903377 100644 --- a/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py +++ b/yardstick/tests/unit/network_services/traffic_profile/test_prox_binsearch.py @@ -71,38 +71,43 @@ class TestProxBinSearchProfile(unittest.TestCase): self.assertEqual(len(runs), 77) # Result Samples inc theor_max - result_tuple = {'Result_Actual_throughput': 5e-07, - 'Result_theor_max_throughput': 7.5e-07, - 'Result_pktSize': 200} - - profile.queue.put.assert_called_with(result_tuple) - - success_result_tuple = {"Success_CurrentDropPackets": 0.5, - "Success_DropPackets": 0.5, - "Success_LatencyAvg": 5.3, - "Success_LatencyMax": 5.2, - "Success_LatencyMin": 5.1, - "Success_PktSize": 200, - "Success_RxThroughput": 7.5e-07, - "Success_Throughput": 7.5e-07, - "Success_TxThroughput": 0.00012340000000000002} + result_tuple = {'Actual_throughput': 5e-07, + 'theor_max_throughput': 7.5e-07, + 'PktSize': 200, + 'Status': 'Result'} + + test_results = profile.queue.put.call_args[0] + for k in result_tuple: + self.assertEqual(result_tuple[k], test_results[0][k]) + + success_result_tuple = {"CurrentDropPackets": 0.5, + "DropPackets": 0.5, + "LatencyAvg": 5.3, + "LatencyMax": 5.2, + "LatencyMin": 5.1, + "PktSize": 200, + "RxThroughput": 7.5e-07, + "Throughput": 7.5e-07, + "TxThroughput": 0.00012340000000000002, + "Status": 'Success'} calls = profile.queue.put(success_result_tuple) profile.queue.put.assert_has_calls(calls) - success_result_tuple2 = {"Success_CurrentDropPackets": 0.5, - "Success_DropPackets": 0.5, - "Success_LatencyAvg": 5.3, - "Success_LatencyMax": 5.2, - "Success_LatencyMin": 5.1, - "Success_PktSize": 200, - "Success_RxThroughput": 7.5e-07, - "Success_Throughput": 7.5e-07, - "Success_TxThroughput": 123.4, - "Success_can_be_lost": 409600, - "Success_drop_total": 20480, - "Success_rx_total": 4075520, - "Success_tx_total": 4096000} + success_result_tuple2 = {"CurrentDropPackets": 0.5, + "DropPackets": 0.5, + "LatencyAvg": 5.3, + "LatencyMax": 5.2, + "LatencyMin": 5.1, + "PktSize": 200, + "RxThroughput": 7.5e-07, + "Throughput": 7.5e-07, + "TxThroughput": 123.4, + "can_be_lost": 409600, + "drop_total": 20480, + "rx_total": 4075520, + "tx_total": 4096000, + "Status": 'Success'} calls = profile.queue.put(success_result_tuple2) profile.queue.put.assert_has_calls(calls) @@ -183,17 +188,16 @@ class TestProxBinSearchProfile(unittest.TestCase): # Result Samples - result_tuple = {'Result_Actual_throughput': 0, "Result_theor_max_throughput": 0, - "Result_pktSize": 200} + result_tuple = {'Actual_throughput': 0, 'theor_max_throughput': 0, + "Status": 'Result', "Next_Step": ''} profile.queue.put.assert_called_with(result_tuple) # Check for success_ tuple (None expected) calls = profile.queue.put.mock_calls for call in calls: for call_detail in call[1]: - for k in call_detail: - if "Success_" in k: - self.assertRaises(AttributeError) + if call_detail["Status"] == 'Success': + self.assertRaises(AttributeError) def test_execute_4(self): @@ -237,38 +241,43 @@ class TestProxBinSearchProfile(unittest.TestCase): self.assertEqual(len(runs), 7) # Result Samples inc theor_max - result_tuple = {'Result_Actual_throughput': 5e-07, - 'Result_theor_max_throughput': 7.5e-07, - 'Result_pktSize': 200} - - profile.queue.put.assert_called_with(result_tuple) - - success_result_tuple = {"Success_CurrentDropPackets": 0.5, - "Success_DropPackets": 0.5, - "Success_LatencyAvg": 5.3, - "Success_LatencyMax": 5.2, - "Success_LatencyMin": 5.1, - "Success_PktSize": 200, - "Success_RxThroughput": 7.5e-07, - "Success_Throughput": 7.5e-07, - "Success_TxThroughput": 0.00012340000000000002} + result_tuple = {'Actual_throughput': 5e-07, + 'theor_max_throughput': 7.5e-07, + 'PktSize': 200, + "Status": 'Result'} + + test_results = profile.queue.put.call_args[0] + for k in result_tuple: + self.assertEqual(result_tuple[k], test_results[0][k]) + + success_result_tuple = {"CurrentDropPackets": 0.5, + "DropPackets": 0.5, + "LatencyAvg": 5.3, + "LatencyMax": 5.2, + "LatencyMin": 5.1, + "PktSize": 200, + "RxThroughput": 7.5e-07, + "Throughput": 7.5e-07, + "TxThroughput": 0.00012340000000000002, + "Status": 'Success'} calls = profile.queue.put(success_result_tuple) profile.queue.put.assert_has_calls(calls) - success_result_tuple2 = {"Success_CurrentDropPackets": 0.5, - "Success_DropPackets": 0.5, - "Success_LatencyAvg": 5.3, - "Success_LatencyMax": 5.2, - "Success_LatencyMin": 5.1, - "Success_PktSize": 200, - "Success_RxThroughput": 7.5e-07, - "Success_Throughput": 7.5e-07, - "Success_TxThroughput": 123.4, - "Success_can_be_lost": 409600, - "Success_drop_total": 20480, - "Success_rx_total": 4075520, - "Success_tx_total": 4096000} + success_result_tuple2 = {"CurrentDropPackets": 0.5, + "DropPackets": 0.5, + "LatencyAvg": 5.3, + "LatencyMax": 5.2, + "LatencyMin": 5.1, + "PktSize": 200, + "RxThroughput": 7.5e-07, + "Throughput": 7.5e-07, + "TxThroughput": 123.4, + "can_be_lost": 409600, + "drop_total": 20480, + "rx_total": 4075520, + "tx_total": 4096000, + "Status": 'Success'} calls = profile.queue.put(success_result_tuple2) profile.queue.put.assert_has_calls(calls) diff --git a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py index 5ad182f22..a7e61da0f 100644 --- a/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py +++ b/yardstick/tests/unit/network_services/vnf_generic/vnf/test_tg_prox.py @@ -317,6 +317,7 @@ class TestProxTrafficGen(unittest.TestCase): prox_traffic_gen = ProxTrafficGen(NAME, self.VNFD0, 'task_id') self.assertIsNone(prox_traffic_gen._tg_process) self.assertIsNone(prox_traffic_gen._traffic_process) + self.assertIsNone(prox_traffic_gen._mq_producer) @mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node') @mock.patch(SSH_HELPER) diff --git a/yardstick/tests/unit/orchestrator/test_heat.py b/yardstick/tests/unit/orchestrator/test_heat.py index 3ec59a3c2..2e60a72cb 100644 --- a/yardstick/tests/unit/orchestrator/test_heat.py +++ b/yardstick/tests/unit/orchestrator/test_heat.py @@ -256,6 +256,25 @@ class HeatTemplateTestCase(unittest.TestCase): self.assertEqual(self.template.resources['some-server-group'][ 'properties']['policies'], ['anti-affinity']) + def test_add_security_group(self): + security_group = { + 'rules': [ + {'remote_ip_prefix': '0.0.0.0/0', + 'port_range_max': 65535, + 'port_range_min': 1, + 'protocol': 'custom'}, + ] + } + self.template.add_security_group('some-security-group', security_group) + + secgroup_rsc = self.template.resources['some-security-group'] + + self.assertEqual(secgroup_rsc['type'], "OS::Neutron::SecurityGroup") + self.assertEqual(secgroup_rsc['properties']['description'], + "Custom security group rules defined by the user") + self.assertEqual(secgroup_rsc['properties']['rules'][0]['protocol'], + 'custom') + def test__add_resources_to_template_raw(self): test_context = node.NodeContext() self.addCleanup(test_context._delete_context) |