summaryrefslogtreecommitdiffstats
path: root/qemu/tests/qemu-iotests/136
diff options
context:
space:
mode:
authorRajithaY <rajithax.yerrumsetty@intel.com>2017-04-25 03:31:15 -0700
committerRajitha Yerrumchetty <rajithax.yerrumsetty@intel.com>2017-05-22 06:48:08 +0000
commitbb756eebdac6fd24e8919e2c43f7d2c8c4091f59 (patch)
treeca11e03542edf2d8f631efeca5e1626d211107e3 /qemu/tests/qemu-iotests/136
parenta14b48d18a9ed03ec191cf16b162206998a895ce (diff)
Adding qemu as a submodule of KVMFORNFV
This Patch includes the changes to add qemu as a submodule to kvmfornfv repo and make use of the updated latest qemu for the execution of all testcase Change-Id: I1280af507a857675c7f81d30c95255635667bdd7 Signed-off-by:RajithaY<rajithax.yerrumsetty@intel.com>
Diffstat (limited to 'qemu/tests/qemu-iotests/136')
-rw-r--r--qemu/tests/qemu-iotests/136349
1 files changed, 0 insertions, 349 deletions
diff --git a/qemu/tests/qemu-iotests/136 b/qemu/tests/qemu-iotests/136
deleted file mode 100644
index e8c6937fc..000000000
--- a/qemu/tests/qemu-iotests/136
+++ /dev/null
@@ -1,349 +0,0 @@
-#!/usr/bin/env python
-#
-# Tests for block device statistics
-#
-# Copyright (C) 2015 Igalia, S.L.
-# Author: Alberto Garcia <berto@igalia.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import iotests
-import os
-
-interval_length = 10
-nsec_per_sec = 1000000000
-op_latency = nsec_per_sec / 1000 # See qtest_latency_ns in accounting.c
-bad_sector = 8192
-bad_offset = bad_sector * 512
-blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf')
-
-class BlockDeviceStatsTestCase(iotests.QMPTestCase):
- test_img = "null-aio://"
- total_rd_bytes = 0
- total_rd_ops = 0
- total_wr_bytes = 0
- total_wr_ops = 0
- total_wr_merged = 0
- total_flush_ops = 0
- failed_rd_ops = 0
- failed_wr_ops = 0
- invalid_rd_ops = 0
- invalid_wr_ops = 0
- wr_highest_offset = 0
- account_invalid = False
- account_failed = False
-
- def blockstats(self, device):
- result = self.vm.qmp("query-blockstats")
- for r in result['return']:
- if r['device'] == device:
- return r['stats']
- raise Exception("Device not found for blockstats: %s" % device)
-
- def create_blkdebug_file(self):
- file = open(blkdebug_file, 'w')
- file.write('''
-[inject-error]
-event = "read_aio"
-errno = "5"
-sector = "%d"
-
-[inject-error]
-event = "write_aio"
-errno = "5"
-sector = "%d"
-''' % (bad_sector, bad_sector))
- file.close()
-
- def setUp(self):
- drive_args = []
- drive_args.append("stats-intervals.0=%d" % interval_length)
- drive_args.append("stats-account-invalid=%s" %
- (self.account_invalid and "on" or "off"))
- drive_args.append("stats-account-failed=%s" %
- (self.account_failed and "on" or "off"))
- self.create_blkdebug_file()
- self.vm = iotests.VM().add_drive('blkdebug:%s:%s ' %
- (blkdebug_file, self.test_img),
- ','.join(drive_args))
- self.vm.launch()
- # Set an initial value for the clock
- self.vm.qtest("clock_step %d" % nsec_per_sec)
-
- def tearDown(self):
- self.vm.shutdown()
- os.remove(blkdebug_file)
-
- def accounted_ops(self, read = False, write = False, flush = False):
- ops = 0
- if write:
- ops += self.total_wr_ops
- if self.account_failed:
- ops += self.failed_wr_ops
- if self.account_invalid:
- ops += self.invalid_wr_ops
- if read:
- ops += self.total_rd_ops
- if self.account_failed:
- ops += self.failed_rd_ops
- if self.account_invalid:
- ops += self.invalid_rd_ops
- if flush:
- ops += self.total_flush_ops
- return ops
-
- def accounted_latency(self, read = False, write = False, flush = False):
- latency = 0
- if write:
- latency += self.total_wr_ops * op_latency
- if self.account_failed:
- latency += self.failed_wr_ops * op_latency
- if read:
- latency += self.total_rd_ops * op_latency
- if self.account_failed:
- latency += self.failed_rd_ops * op_latency
- if flush:
- latency += self.total_flush_ops * op_latency
- return latency
-
- def check_values(self):
- stats = self.blockstats('drive0')
-
- # Check that the totals match with what we have calculated
- self.assertEqual(self.total_rd_bytes, stats['rd_bytes'])
- self.assertEqual(self.total_wr_bytes, stats['wr_bytes'])
- self.assertEqual(self.total_rd_ops, stats['rd_operations'])
- self.assertEqual(self.total_wr_ops, stats['wr_operations'])
- self.assertEqual(self.total_flush_ops, stats['flush_operations'])
- self.assertEqual(self.wr_highest_offset, stats['wr_highest_offset'])
- self.assertEqual(self.failed_rd_ops, stats['failed_rd_operations'])
- self.assertEqual(self.failed_wr_ops, stats['failed_wr_operations'])
- self.assertEqual(self.invalid_rd_ops, stats['invalid_rd_operations'])
- self.assertEqual(self.invalid_wr_ops, stats['invalid_wr_operations'])
- self.assertEqual(self.account_invalid, stats['account_invalid'])
- self.assertEqual(self.account_failed, stats['account_failed'])
- self.assertEqual(self.total_wr_merged, stats['wr_merged'])
-
- # Check that there's exactly one interval with the length we defined
- self.assertEqual(1, len(stats['timed_stats']))
- timed_stats = stats['timed_stats'][0]
- self.assertEqual(interval_length, timed_stats['interval_length'])
-
- total_rd_latency = self.accounted_latency(read = True)
- if (total_rd_latency != 0):
- self.assertEqual(total_rd_latency, stats['rd_total_time_ns'])
- self.assertEqual(op_latency, timed_stats['min_rd_latency_ns'])
- self.assertEqual(op_latency, timed_stats['max_rd_latency_ns'])
- self.assertEqual(op_latency, timed_stats['avg_rd_latency_ns'])
- self.assertLess(0, timed_stats['avg_rd_queue_depth'])
- else:
- self.assertEqual(0, stats['rd_total_time_ns'])
- self.assertEqual(0, timed_stats['min_rd_latency_ns'])
- self.assertEqual(0, timed_stats['max_rd_latency_ns'])
- self.assertEqual(0, timed_stats['avg_rd_latency_ns'])
- self.assertEqual(0, timed_stats['avg_rd_queue_depth'])
-
- # min read latency <= avg read latency <= max read latency
- self.assertLessEqual(timed_stats['min_rd_latency_ns'],
- timed_stats['avg_rd_latency_ns'])
- self.assertLessEqual(timed_stats['avg_rd_latency_ns'],
- timed_stats['max_rd_latency_ns'])
-
- total_wr_latency = self.accounted_latency(write = True)
- if (total_wr_latency != 0):
- self.assertEqual(total_wr_latency, stats['wr_total_time_ns'])
- self.assertEqual(op_latency, timed_stats['min_wr_latency_ns'])
- self.assertEqual(op_latency, timed_stats['max_wr_latency_ns'])
- self.assertEqual(op_latency, timed_stats['avg_wr_latency_ns'])
- self.assertLess(0, timed_stats['avg_wr_queue_depth'])
- else:
- self.assertEqual(0, stats['wr_total_time_ns'])
- self.assertEqual(0, timed_stats['min_wr_latency_ns'])
- self.assertEqual(0, timed_stats['max_wr_latency_ns'])
- self.assertEqual(0, timed_stats['avg_wr_latency_ns'])
- self.assertEqual(0, timed_stats['avg_wr_queue_depth'])
-
- # min write latency <= avg write latency <= max write latency
- self.assertLessEqual(timed_stats['min_wr_latency_ns'],
- timed_stats['avg_wr_latency_ns'])
- self.assertLessEqual(timed_stats['avg_wr_latency_ns'],
- timed_stats['max_wr_latency_ns'])
-
- total_flush_latency = self.accounted_latency(flush = True)
- if (total_flush_latency != 0):
- self.assertEqual(total_flush_latency, stats['flush_total_time_ns'])
- self.assertEqual(op_latency, timed_stats['min_flush_latency_ns'])
- self.assertEqual(op_latency, timed_stats['max_flush_latency_ns'])
- self.assertEqual(op_latency, timed_stats['avg_flush_latency_ns'])
- else:
- self.assertEqual(0, stats['flush_total_time_ns'])
- self.assertEqual(0, timed_stats['min_flush_latency_ns'])
- self.assertEqual(0, timed_stats['max_flush_latency_ns'])
- self.assertEqual(0, timed_stats['avg_flush_latency_ns'])
-
- # min flush latency <= avg flush latency <= max flush latency
- self.assertLessEqual(timed_stats['min_flush_latency_ns'],
- timed_stats['avg_flush_latency_ns'])
- self.assertLessEqual(timed_stats['avg_flush_latency_ns'],
- timed_stats['max_flush_latency_ns'])
-
- # idle_time_ns must be > 0 if we have performed any operation
- if (self.accounted_ops(read = True, write = True, flush = True) != 0):
- self.assertLess(0, stats['idle_time_ns'])
- else:
- self.assertFalse(stats.has_key('idle_time_ns'))
-
- # This test does not alter these, so they must be all 0
- self.assertEqual(0, stats['rd_merged'])
- self.assertEqual(0, stats['failed_flush_operations'])
- self.assertEqual(0, stats['invalid_flush_operations'])
-
- def do_test_stats(self, rd_size = 0, rd_ops = 0, wr_size = 0, wr_ops = 0,
- flush_ops = 0, invalid_rd_ops = 0, invalid_wr_ops = 0,
- failed_rd_ops = 0, failed_wr_ops = 0, wr_merged = 0):
- # The 'ops' list will contain all the requested I/O operations
- ops = []
- for i in range(rd_ops):
- ops.append("aio_read %d %d" % (i * rd_size, rd_size))
-
- for i in range(wr_ops):
- ops.append("aio_write %d %d" % (i * wr_size, wr_size))
-
- for i in range(flush_ops):
- ops.append("aio_flush")
-
- highest_offset = wr_ops * wr_size
-
- # Two types of invalid operations: unaligned length and unaligned offset
- for i in range(invalid_rd_ops / 2):
- ops.append("aio_read 0 511")
-
- for i in range(invalid_rd_ops / 2, invalid_rd_ops):
- ops.append("aio_read 13 512")
-
- for i in range(invalid_wr_ops / 2):
- ops.append("aio_write 0 511")
-
- for i in range(invalid_wr_ops / 2, invalid_wr_ops):
- ops.append("aio_write 13 512")
-
- for i in range(failed_rd_ops):
- ops.append("aio_read %d 512" % bad_offset)
-
- for i in range(failed_wr_ops):
- ops.append("aio_write %d 512" % bad_offset)
-
- if failed_wr_ops > 0:
- highest_offset = max(highest_offset, bad_offset + 512)
-
- for i in range(wr_merged):
- first = i * wr_size * 2
- second = first + wr_size
- ops.append("multiwrite %d %d ; %d %d" %
- (first, wr_size, second, wr_size))
-
- highest_offset = max(highest_offset, wr_merged * wr_size * 2)
-
- # Now perform all operations
- for op in ops:
- self.vm.hmp_qemu_io("drive0", op)
-
- # Update the expected totals
- self.total_rd_bytes += rd_ops * rd_size
- self.total_rd_ops += rd_ops
- self.total_wr_bytes += wr_ops * wr_size
- self.total_wr_ops += wr_ops
- self.total_wr_merged += wr_merged
- self.total_flush_ops += flush_ops
- self.invalid_rd_ops += invalid_rd_ops
- self.invalid_wr_ops += invalid_wr_ops
- self.failed_rd_ops += failed_rd_ops
- self.failed_wr_ops += failed_wr_ops
-
- self.wr_highest_offset = max(self.wr_highest_offset, highest_offset)
-
- # Advance the clock so idle_time_ns has a meaningful value
- self.vm.qtest("clock_step %d" % nsec_per_sec)
-
- # And check that the actual statistics match the expected ones
- self.check_values()
-
- def test_read_only(self):
- test_values = [[512, 1],
- [65536, 1],
- [512, 12],
- [65536, 12]]
- for i in test_values:
- self.do_test_stats(rd_size = i[0], rd_ops = i[1])
-
- def test_write_only(self):
- test_values = [[512, 1],
- [65536, 1],
- [512, 12],
- [65536, 12]]
- for i in test_values:
- self.do_test_stats(wr_size = i[0], wr_ops = i[1])
-
- def test_invalid(self):
- self.do_test_stats(invalid_rd_ops = 7)
- self.do_test_stats(invalid_wr_ops = 3)
- self.do_test_stats(invalid_rd_ops = 4, invalid_wr_ops = 5)
-
- def test_failed(self):
- self.do_test_stats(failed_rd_ops = 8)
- self.do_test_stats(failed_wr_ops = 6)
- self.do_test_stats(failed_rd_ops = 5, failed_wr_ops = 12)
-
- def test_flush(self):
- self.do_test_stats(flush_ops = 8)
-
- def test_merged(self):
- for i in range(5):
- self.do_test_stats(wr_merged = i * 3)
-
- def test_all(self):
- # rd_size, rd_ops, wr_size, wr_ops, flush_ops
- # invalid_rd_ops, invalid_wr_ops,
- # failed_rd_ops, failed_wr_ops
- # wr_merged
- test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 1],
- [65536, 1, 2048, 12, 7, 7, 5, 2, 5, 5],
- [32768, 9, 8192, 1, 4, 3, 2, 4, 6, 4],
- [16384, 11, 3584, 16, 9, 8, 6, 7, 3, 4]]
- for i in test_values:
- self.do_test_stats(*i)
-
- def test_no_op(self):
- # All values must be sane before doing any I/O
- self.check_values()
-
-
-class BlockDeviceStatsTestAccountInvalid(BlockDeviceStatsTestCase):
- account_invalid = True
- account_failed = False
-
-class BlockDeviceStatsTestAccountFailed(BlockDeviceStatsTestCase):
- account_invalid = False
- account_failed = True
-
-class BlockDeviceStatsTestAccountBoth(BlockDeviceStatsTestCase):
- account_invalid = True
- account_failed = True
-
-class BlockDeviceStatsTestCoroutine(BlockDeviceStatsTestCase):
- test_img = "null-co://"
-
-if __name__ == '__main__':
- iotests.main(supported_fmts=["raw"])