summaryrefslogtreecommitdiffstats
path: root/qemu/tests/image-fuzzer
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@intel.com>2015-08-28 09:58:54 +0800
committerYang Zhang <yang.z.zhang@intel.com>2015-09-01 12:44:00 +0800
commite44e3482bdb4d0ebde2d8b41830ac2cdb07948fb (patch)
tree66b09f592c55df2878107a468a91d21506104d3f /qemu/tests/image-fuzzer
parent9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (diff)
Add qemu 2.4.0
Change-Id: Ic99cbad4b61f8b127b7dc74d04576c0bcbaaf4f5 Signed-off-by: Yang Zhang <yang.z.zhang@intel.com>
Diffstat (limited to 'qemu/tests/image-fuzzer')
-rw-r--r--qemu/tests/image-fuzzer/qcow2/__init__.py1
-rw-r--r--qemu/tests/image-fuzzer/qcow2/fuzz.py367
-rw-r--r--qemu/tests/image-fuzzer/qcow2/layout.py612
-rwxr-xr-xqemu/tests/image-fuzzer/runner.py437
4 files changed, 1417 insertions, 0 deletions
diff --git a/qemu/tests/image-fuzzer/qcow2/__init__.py b/qemu/tests/image-fuzzer/qcow2/__init__.py
new file mode 100644
index 000000000..e2ebe1931
--- /dev/null
+++ b/qemu/tests/image-fuzzer/qcow2/__init__.py
@@ -0,0 +1 @@
+from layout import create_image
diff --git a/qemu/tests/image-fuzzer/qcow2/fuzz.py b/qemu/tests/image-fuzzer/qcow2/fuzz.py
new file mode 100644
index 000000000..20eba6bc1
--- /dev/null
+++ b/qemu/tests/image-fuzzer/qcow2/fuzz.py
@@ -0,0 +1,367 @@
+# Fuzzing functions for qcow2 fields
+#
+# Copyright (C) 2014 Maria Kustova <maria.k@catit.be>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import random
+
+UINT8 = 0xff
+UINT16 = 0xffff
+UINT32 = 0xffffffff
+UINT64 = 0xffffffffffffffff
+# Most significant bit orders
+UINT32_M = 31
+UINT64_M = 63
+# Fuzz vectors
+UINT8_V = [0, 0x10, UINT8/4, UINT8/2 - 1, UINT8/2, UINT8/2 + 1, UINT8 - 1,
+ UINT8]
+UINT16_V = [0, 0x100, 0x1000, UINT16/4, UINT16/2 - 1, UINT16/2, UINT16/2 + 1,
+ UINT16 - 1, UINT16]
+UINT32_V = [0, 0x100, 0x1000, 0x10000, 0x100000, UINT32/4, UINT32/2 - 1,
+ UINT32/2, UINT32/2 + 1, UINT32 - 1, UINT32]
+UINT64_V = UINT32_V + [0x1000000, 0x10000000, 0x100000000, UINT64/4,
+ UINT64/2 - 1, UINT64/2, UINT64/2 + 1, UINT64 - 1,
+ UINT64]
+STRING_V = ['%s%p%x%d', '.1024d', '%.2049d', '%p%p%p%p', '%x%x%x%x',
+ '%d%d%d%d', '%s%s%s%s', '%99999999999s', '%08x', '%%20d', '%%20n',
+ '%%20x', '%%20s', '%s%s%s%s%s%s%s%s%s%s', '%p%p%p%p%p%p%p%p%p%p',
+ '%#0123456x%08x%x%s%p%d%n%o%u%c%h%l%q%j%z%Z%t%i%e%g%f%a%C%S%08x%%',
+ '%s x 129', '%x x 257']
+
+
+def random_from_intervals(intervals):
+ """Select a random integer number from the list of specified intervals.
+
+ Each interval is a tuple of lower and upper limits of the interval. The
+ limits are included. Intervals in a list should not overlap.
+ """
+ total = reduce(lambda x, y: x + y[1] - y[0] + 1, intervals, 0)
+ r = random.randint(0, total - 1) + intervals[0][0]
+ for x in zip(intervals, intervals[1:]):
+ r = r + (r > x[0][1]) * (x[1][0] - x[0][1] - 1)
+ return r
+
+
+def random_bits(bit_ranges):
+ """Generate random binary mask with ones in the specified bit ranges.
+
+ Each bit_ranges is a list of tuples of lower and upper limits of bit
+ positions will be fuzzed. The limits are included. Random amount of bits
+ in range limits will be set to ones. The mask is returned in decimal
+ integer format.
+ """
+ bit_numbers = []
+ # Select random amount of random positions in bit_ranges
+ for rng in bit_ranges:
+ bit_numbers += random.sample(range(rng[0], rng[1] + 1),
+ random.randint(0, rng[1] - rng[0] + 1))
+ val = 0
+ # Set bits on selected positions to ones
+ for bit in bit_numbers:
+ val |= 1 << bit
+ return val
+
+
+def truncate_string(strings, length):
+ """Return strings truncated to specified length."""
+ if type(strings) == list:
+ return [s[:length] for s in strings]
+ else:
+ return strings[:length]
+
+
+def validator(current, pick, choices):
+ """Return a value not equal to the current selected by the pick
+ function from choices.
+ """
+ while True:
+ val = pick(choices)
+ if not val == current:
+ return val
+
+
+def int_validator(current, intervals):
+ """Return a random value from intervals not equal to the current.
+
+ This function is useful for selection from valid values except current one.
+ """
+ return validator(current, random_from_intervals, intervals)
+
+
+def bit_validator(current, bit_ranges):
+ """Return a random bit mask not equal to the current.
+
+ This function is useful for selection from valid values except current one.
+ """
+ return validator(current, random_bits, bit_ranges)
+
+
+def string_validator(current, strings):
+ """Return a random string value from the list not equal to the current.
+
+ This function is useful for selection from valid values except current one.
+ """
+ return validator(current, random.choice, strings)
+
+
+def selector(current, constraints, validate=int_validator):
+ """Select one value from all defined by constraints.
+
+ Each constraint produces one random value satisfying to it. The function
+ randomly selects one value satisfying at least one constraint (depending on
+ constraints overlaps).
+ """
+ def iter_validate(c):
+ """Apply validate() only to constraints represented as lists.
+
+ This auxiliary function replaces short circuit conditions not supported
+ in Python 2.4
+ """
+ if type(c) == list:
+ return validate(current, c)
+ else:
+ return c
+
+ fuzz_values = [iter_validate(c) for c in constraints]
+ # Remove current for cases it's implicitly specified in constraints
+ # Duplicate validator functionality to prevent decreasing of probability
+ # to get one of allowable values
+ # TODO: remove validators after implementation of intelligent selection
+ # of fields will be fuzzed
+ try:
+ fuzz_values.remove(current)
+ except ValueError:
+ pass
+ return random.choice(fuzz_values)
+
+
+def magic(current):
+ """Fuzz magic header field.
+
+ The function just returns the current magic value and provides uniformity
+ of calls for all fuzzing functions.
+ """
+ return current
+
+
+def version(current):
+ """Fuzz version header field."""
+ constraints = UINT32_V + [
+ [(2, 3)], # correct values
+ [(0, 1), (4, UINT32)]
+ ]
+ return selector(current, constraints)
+
+
+def backing_file_offset(current):
+ """Fuzz backing file offset header field."""
+ constraints = UINT64_V
+ return selector(current, constraints)
+
+
+def backing_file_size(current):
+ """Fuzz backing file size header field."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def cluster_bits(current):
+ """Fuzz cluster bits header field."""
+ constraints = UINT32_V + [
+ [(9, 20)], # correct values
+ [(0, 9), (20, UINT32)]
+ ]
+ return selector(current, constraints)
+
+
+def size(current):
+ """Fuzz image size header field."""
+ constraints = UINT64_V
+ return selector(current, constraints)
+
+
+def crypt_method(current):
+ """Fuzz crypt method header field."""
+ constraints = UINT32_V + [
+ 1,
+ [(2, UINT32)]
+ ]
+ return selector(current, constraints)
+
+
+def l1_size(current):
+ """Fuzz L1 table size header field."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def l1_table_offset(current):
+ """Fuzz L1 table offset header field."""
+ constraints = UINT64_V
+ return selector(current, constraints)
+
+
+def refcount_table_offset(current):
+ """Fuzz refcount table offset header field."""
+ constraints = UINT64_V
+ return selector(current, constraints)
+
+
+def refcount_table_clusters(current):
+ """Fuzz refcount table clusters header field."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def nb_snapshots(current):
+ """Fuzz number of snapshots header field."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def snapshots_offset(current):
+ """Fuzz snapshots offset header field."""
+ constraints = UINT64_V
+ return selector(current, constraints)
+
+
+def incompatible_features(current):
+ """Fuzz incompatible features header field."""
+ constraints = [
+ [(0, 1)], # allowable values
+ [(0, UINT64_M)]
+ ]
+ return selector(current, constraints, bit_validator)
+
+
+def compatible_features(current):
+ """Fuzz compatible features header field."""
+ constraints = [
+ [(0, UINT64_M)]
+ ]
+ return selector(current, constraints, bit_validator)
+
+
+def autoclear_features(current):
+ """Fuzz autoclear features header field."""
+ constraints = [
+ [(0, UINT64_M)]
+ ]
+ return selector(current, constraints, bit_validator)
+
+
+def refcount_order(current):
+ """Fuzz number of refcount order header field."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def header_length(current):
+ """Fuzz number of refcount order header field."""
+ constraints = UINT32_V + [
+ 72,
+ 104,
+ [(0, UINT32)]
+ ]
+ return selector(current, constraints)
+
+
+def bf_name(current):
+ """Fuzz the backing file name."""
+ constraints = [
+ truncate_string(STRING_V, len(current))
+ ]
+ return selector(current, constraints, string_validator)
+
+
+def ext_magic(current):
+ """Fuzz magic field of a header extension."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def ext_length(current):
+ """Fuzz length field of a header extension."""
+ constraints = UINT32_V
+ return selector(current, constraints)
+
+
+def bf_format(current):
+ """Fuzz backing file format in the corresponding header extension."""
+ constraints = [
+ truncate_string(STRING_V, len(current)),
+ truncate_string(STRING_V, (len(current) + 7) & ~7) # Fuzz padding
+ ]
+ return selector(current, constraints, string_validator)
+
+
+def feature_type(current):
+ """Fuzz feature type field of a feature name table header extension."""
+ constraints = UINT8_V
+ return selector(current, constraints)
+
+
+def feature_bit_number(current):
+ """Fuzz bit number field of a feature name table header extension."""
+ constraints = UINT8_V
+ return selector(current, constraints)
+
+
+def feature_name(current):
+ """Fuzz feature name field of a feature name table header extension."""
+ constraints = [
+ truncate_string(STRING_V, len(current)),
+ truncate_string(STRING_V, 46) # Fuzz padding (field length = 46)
+ ]
+ return selector(current, constraints, string_validator)
+
+
+def l1_entry(current):
+ """Fuzz an entry of the L1 table."""
+ constraints = UINT64_V
+ # Reserved bits are ignored
+ # Added a possibility when only flags are fuzzed
+ offset = 0x7fffffffffffffff & \
+ random.choice([selector(current, constraints), current])
+ is_cow = random.randint(0, 1)
+ return offset + (is_cow << UINT64_M)
+
+
+def l2_entry(current):
+ """Fuzz an entry of an L2 table."""
+ constraints = UINT64_V
+ # Reserved bits are ignored
+ # Add a possibility when only flags are fuzzed
+ offset = 0x3ffffffffffffffe & \
+ random.choice([selector(current, constraints), current])
+ is_compressed = random.randint(0, 1)
+ is_cow = random.randint(0, 1)
+ is_zero = random.randint(0, 1)
+ value = offset + (is_cow << UINT64_M) + \
+ (is_compressed << UINT64_M - 1) + is_zero
+ return value
+
+
+def refcount_table_entry(current):
+ """Fuzz an entry of the refcount table."""
+ constraints = UINT64_V
+ return selector(current, constraints)
+
+
+def refcount_block_entry(current):
+ """Fuzz an entry of a refcount block."""
+ constraints = UINT16_V
+ return selector(current, constraints)
diff --git a/qemu/tests/image-fuzzer/qcow2/layout.py b/qemu/tests/image-fuzzer/qcow2/layout.py
new file mode 100644
index 000000000..63e801f4e
--- /dev/null
+++ b/qemu/tests/image-fuzzer/qcow2/layout.py
@@ -0,0 +1,612 @@
+# Generator of fuzzed qcow2 images
+#
+# Copyright (C) 2014 Maria Kustova <maria.k@catit.be>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import random
+import struct
+import fuzz
+from math import ceil
+from os import urandom
+from itertools import chain
+
+MAX_IMAGE_SIZE = 10 * (1 << 20)
+# Standard sizes
+UINT32_S = 4
+UINT64_S = 8
+
+
+class Field(object):
+
+ """Atomic image element (field).
+
+ The class represents an image field as quadruple of a data format
+ of value necessary for its packing to binary form, an offset from
+ the beginning of the image, a value and a name.
+
+ The field can be iterated as a list [format, offset, value, name].
+ """
+
+ __slots__ = ('fmt', 'offset', 'value', 'name')
+
+ def __init__(self, fmt, offset, val, name):
+ self.fmt = fmt
+ self.offset = offset
+ self.value = val
+ self.name = name
+
+ def __iter__(self):
+ return iter([self.fmt, self.offset, self.value, self.name])
+
+ def __repr__(self):
+ return "Field(fmt='%s', offset=%d, value=%s, name=%s)" % \
+ (self.fmt, self.offset, str(self.value), self.name)
+
+
+class FieldsList(object):
+
+ """List of fields.
+
+ The class allows access to a field in the list by its name.
+ """
+
+ def __init__(self, meta_data=None):
+ if meta_data is None:
+ self.data = []
+ else:
+ self.data = [Field(*f)
+ for f in meta_data]
+
+ def __getitem__(self, name):
+ return [x for x in self.data if x.name == name]
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+
+class Image(object):
+
+ """ Qcow2 image object.
+
+ This class allows to create qcow2 images with random valid structures and
+ values, fuzz them via external qcow2.fuzz module and write the result to
+ a file.
+ """
+
+ def __init__(self, backing_file_name=None):
+ """Create a random valid qcow2 image with the correct header and stored
+ backing file name.
+ """
+ cluster_bits, self.image_size = self._size_params()
+ self.cluster_size = 1 << cluster_bits
+ self.header = FieldsList()
+ self.backing_file_name = FieldsList()
+ self.backing_file_format = FieldsList()
+ self.feature_name_table = FieldsList()
+ self.end_of_extension_area = FieldsList()
+ self.l2_tables = FieldsList()
+ self.l1_table = FieldsList()
+ self.refcount_table = FieldsList()
+ self.refcount_blocks = FieldsList()
+ self.ext_offset = 0
+ self.create_header(cluster_bits, backing_file_name)
+ self.set_backing_file_name(backing_file_name)
+ self.data_clusters = self._alloc_data(self.image_size,
+ self.cluster_size)
+ # Percentage of fields will be fuzzed
+ self.bias = random.uniform(0.2, 0.5)
+
+ def __iter__(self):
+ return chain(self.header, self.backing_file_format,
+ self.feature_name_table, self.end_of_extension_area,
+ self.backing_file_name, self.l1_table, self.l2_tables,
+ self.refcount_table, self.refcount_blocks)
+
+ def create_header(self, cluster_bits, backing_file_name=None):
+ """Generate a random valid header."""
+ meta_header = [
+ ['>4s', 0, "QFI\xfb", 'magic'],
+ ['>I', 4, random.randint(2, 3), 'version'],
+ ['>Q', 8, 0, 'backing_file_offset'],
+ ['>I', 16, 0, 'backing_file_size'],
+ ['>I', 20, cluster_bits, 'cluster_bits'],
+ ['>Q', 24, self.image_size, 'size'],
+ ['>I', 32, 0, 'crypt_method'],
+ ['>I', 36, 0, 'l1_size'],
+ ['>Q', 40, 0, 'l1_table_offset'],
+ ['>Q', 48, 0, 'refcount_table_offset'],
+ ['>I', 56, 0, 'refcount_table_clusters'],
+ ['>I', 60, 0, 'nb_snapshots'],
+ ['>Q', 64, 0, 'snapshots_offset'],
+ ['>Q', 72, 0, 'incompatible_features'],
+ ['>Q', 80, 0, 'compatible_features'],
+ ['>Q', 88, 0, 'autoclear_features'],
+ # Only refcount_order = 4 is supported by current (07.2014)
+ # implementation of QEMU
+ ['>I', 96, 4, 'refcount_order'],
+ ['>I', 100, 0, 'header_length']
+ ]
+ self.header = FieldsList(meta_header)
+
+ if self.header['version'][0].value == 2:
+ self.header['header_length'][0].value = 72
+ else:
+ self.header['incompatible_features'][0].value = \
+ random.getrandbits(2)
+ self.header['compatible_features'][0].value = random.getrandbits(1)
+ self.header['header_length'][0].value = 104
+ # Extensions start at the header last field offset and the field size
+ self.ext_offset = struct.calcsize(
+ self.header['header_length'][0].fmt) + \
+ self.header['header_length'][0].offset
+ end_of_extension_area_len = 2 * UINT32_S
+ free_space = self.cluster_size - self.ext_offset - \
+ end_of_extension_area_len
+ # If the backing file name specified and there is enough space for it
+ # in the first cluster, then it's placed in the very end of the first
+ # cluster.
+ if (backing_file_name is not None) and \
+ (free_space >= len(backing_file_name)):
+ self.header['backing_file_size'][0].value = len(backing_file_name)
+ self.header['backing_file_offset'][0].value = \
+ self.cluster_size - len(backing_file_name)
+
+ def set_backing_file_name(self, backing_file_name=None):
+ """Add the name of the backing file at the offset specified
+ in the header.
+ """
+ if (backing_file_name is not None) and \
+ (not self.header['backing_file_offset'][0].value == 0):
+ data_len = len(backing_file_name)
+ data_fmt = '>' + str(data_len) + 's'
+ self.backing_file_name = FieldsList([
+ [data_fmt, self.header['backing_file_offset'][0].value,
+ backing_file_name, 'bf_name']
+ ])
+
+ def set_backing_file_format(self, backing_file_fmt=None):
+ """Generate the header extension for the backing file format."""
+ if backing_file_fmt is not None:
+ # Calculation of the free space available in the first cluster
+ end_of_extension_area_len = 2 * UINT32_S
+ high_border = (self.header['backing_file_offset'][0].value or
+ (self.cluster_size - 1)) - \
+ end_of_extension_area_len
+ free_space = high_border - self.ext_offset
+ ext_size = 2 * UINT32_S + ((len(backing_file_fmt) + 7) & ~7)
+
+ if free_space >= ext_size:
+ ext_data_len = len(backing_file_fmt)
+ ext_data_fmt = '>' + str(ext_data_len) + 's'
+ ext_padding_len = 7 - (ext_data_len - 1) % 8
+ self.backing_file_format = FieldsList([
+ ['>I', self.ext_offset, 0xE2792ACA, 'ext_magic'],
+ ['>I', self.ext_offset + UINT32_S, ext_data_len,
+ 'ext_length'],
+ [ext_data_fmt, self.ext_offset + UINT32_S * 2,
+ backing_file_fmt, 'bf_format']
+ ])
+ self.ext_offset = \
+ struct.calcsize(
+ self.backing_file_format['bf_format'][0].fmt) + \
+ ext_padding_len + \
+ self.backing_file_format['bf_format'][0].offset
+
+ def create_feature_name_table(self):
+ """Generate a random header extension for names of features used in
+ the image.
+ """
+ def gen_feat_ids():
+ """Return random feature type and feature bit."""
+ return (random.randint(0, 2), random.randint(0, 63))
+
+ end_of_extension_area_len = 2 * UINT32_S
+ high_border = (self.header['backing_file_offset'][0].value or
+ (self.cluster_size - 1)) - \
+ end_of_extension_area_len
+ free_space = high_border - self.ext_offset
+ # Sum of sizes of 'magic' and 'length' header extension fields
+ ext_header_len = 2 * UINT32_S
+ fnt_entry_size = 6 * UINT64_S
+ num_fnt_entries = min(10, (free_space - ext_header_len) /
+ fnt_entry_size)
+ if not num_fnt_entries == 0:
+ feature_tables = []
+ feature_ids = []
+ inner_offset = self.ext_offset + ext_header_len
+ feat_name = 'some cool feature'
+ while len(feature_tables) < num_fnt_entries * 3:
+ feat_type, feat_bit = gen_feat_ids()
+ # Remove duplicates
+ while (feat_type, feat_bit) in feature_ids:
+ feat_type, feat_bit = gen_feat_ids()
+ feature_ids.append((feat_type, feat_bit))
+ feat_fmt = '>' + str(len(feat_name)) + 's'
+ feature_tables += [['B', inner_offset,
+ feat_type, 'feature_type'],
+ ['B', inner_offset + 1, feat_bit,
+ 'feature_bit_number'],
+ [feat_fmt, inner_offset + 2,
+ feat_name, 'feature_name']
+ ]
+ inner_offset += fnt_entry_size
+ # No padding for the extension is necessary, because
+ # the extension length is multiple of 8
+ self.feature_name_table = FieldsList([
+ ['>I', self.ext_offset, 0x6803f857, 'ext_magic'],
+ # One feature table contains 3 fields and takes 48 bytes
+ ['>I', self.ext_offset + UINT32_S,
+ len(feature_tables) / 3 * 48, 'ext_length']
+ ] + feature_tables)
+ self.ext_offset = inner_offset
+
+ def set_end_of_extension_area(self):
+ """Generate a mandatory header extension marking end of header
+ extensions.
+ """
+ self.end_of_extension_area = FieldsList([
+ ['>I', self.ext_offset, 0, 'ext_magic'],
+ ['>I', self.ext_offset + UINT32_S, 0, 'ext_length']
+ ])
+
+ def create_l_structures(self):
+ """Generate random valid L1 and L2 tables."""
+ def create_l2_entry(host, guest, l2_cluster):
+ """Generate one L2 entry."""
+ offset = l2_cluster * self.cluster_size
+ l2_size = self.cluster_size / UINT64_S
+ entry_offset = offset + UINT64_S * (guest % l2_size)
+ cluster_descriptor = host * self.cluster_size
+ if not self.header['version'][0].value == 2:
+ cluster_descriptor += random.randint(0, 1)
+ # While snapshots are not supported, bit #63 = 1
+ # Compressed clusters are not supported => bit #62 = 0
+ entry_val = (1 << 63) + cluster_descriptor
+ return ['>Q', entry_offset, entry_val, 'l2_entry']
+
+ def create_l1_entry(l2_cluster, l1_offset, guest):
+ """Generate one L1 entry."""
+ l2_size = self.cluster_size / UINT64_S
+ entry_offset = l1_offset + UINT64_S * (guest / l2_size)
+ # While snapshots are not supported bit #63 = 1
+ entry_val = (1 << 63) + l2_cluster * self.cluster_size
+ return ['>Q', entry_offset, entry_val, 'l1_entry']
+
+ if len(self.data_clusters) == 0:
+ # All metadata for an empty guest image needs 4 clusters:
+ # header, rfc table, rfc block, L1 table.
+ # Header takes cluster #0, other clusters ##1-3 can be used
+ l1_offset = random.randint(1, 3) * self.cluster_size
+ l1 = [['>Q', l1_offset, 0, 'l1_entry']]
+ l2 = []
+ else:
+ meta_data = self._get_metadata()
+ guest_clusters = random.sample(range(self.image_size /
+ self.cluster_size),
+ len(self.data_clusters))
+ # Number of entries in a L1/L2 table
+ l_size = self.cluster_size / UINT64_S
+ # Number of clusters necessary for L1 table
+ l1_size = int(ceil((max(guest_clusters) + 1) / float(l_size**2)))
+ l1_start = self._get_adjacent_clusters(self.data_clusters |
+ meta_data, l1_size)
+ meta_data |= set(range(l1_start, l1_start + l1_size))
+ l1_offset = l1_start * self.cluster_size
+ # Indices of L2 tables
+ l2_ids = []
+ # Host clusters allocated for L2 tables
+ l2_clusters = []
+ # L1 entries
+ l1 = []
+ # L2 entries
+ l2 = []
+ for host, guest in zip(self.data_clusters, guest_clusters):
+ l2_id = guest / l_size
+ if l2_id not in l2_ids:
+ l2_ids.append(l2_id)
+ l2_clusters.append(self._get_adjacent_clusters(
+ self.data_clusters | meta_data | set(l2_clusters),
+ 1))
+ l1.append(create_l1_entry(l2_clusters[-1], l1_offset,
+ guest))
+ l2.append(create_l2_entry(host, guest,
+ l2_clusters[l2_ids.index(l2_id)]))
+ self.l2_tables = FieldsList(l2)
+ self.l1_table = FieldsList(l1)
+ self.header['l1_size'][0].value = int(ceil(UINT64_S * self.image_size /
+ float(self.cluster_size**2)))
+ self.header['l1_table_offset'][0].value = l1_offset
+
+ def create_refcount_structures(self):
+ """Generate random refcount blocks and refcount table."""
+ def allocate_rfc_blocks(data, size):
+ """Return indices of clusters allocated for refcount blocks."""
+ cluster_ids = set()
+ diff = block_ids = set([x / size for x in data])
+ while len(diff) != 0:
+ # Allocate all yet not allocated clusters
+ new = self._get_available_clusters(data | cluster_ids,
+ len(diff))
+ # Indices of new refcount blocks necessary to cover clusters
+ # in 'new'
+ diff = set([x / size for x in new]) - block_ids
+ cluster_ids |= new
+ block_ids |= diff
+ return cluster_ids, block_ids
+
+ def allocate_rfc_table(data, init_blocks, block_size):
+ """Return indices of clusters allocated for the refcount table
+ and updated indices of clusters allocated for blocks and indices
+ of blocks.
+ """
+ blocks = set(init_blocks)
+ clusters = set()
+ # Number of entries in one cluster of the refcount table
+ size = self.cluster_size / UINT64_S
+ # Number of clusters necessary for the refcount table based on
+ # the current number of refcount blocks
+ table_size = int(ceil((max(blocks) + 1) / float(size)))
+ # Index of the first cluster of the refcount table
+ table_start = self._get_adjacent_clusters(data, table_size + 1)
+ # Clusters allocated for the current length of the refcount table
+ table_clusters = set(range(table_start, table_start + table_size))
+ # Clusters allocated for the refcount table including
+ # last optional one for potential l1 growth
+ table_clusters_allocated = set(range(table_start, table_start +
+ table_size + 1))
+ # New refcount blocks necessary for clusters occupied by the
+ # refcount table
+ diff = set([c / block_size for c in table_clusters]) - blocks
+ blocks |= diff
+ while len(diff) != 0:
+ # Allocate clusters for new refcount blocks
+ new = self._get_available_clusters((data | clusters) |
+ table_clusters_allocated,
+ len(diff))
+ # Indices of new refcount blocks necessary to cover
+ # clusters in 'new'
+ diff = set([x / block_size for x in new]) - blocks
+ clusters |= new
+ blocks |= diff
+ # Check if the refcount table needs one more cluster
+ if int(ceil((max(blocks) + 1) / float(size))) > table_size:
+ new_block_id = (table_start + table_size) / block_size
+ # Check if the additional table cluster needs
+ # one more refcount block
+ if new_block_id not in blocks:
+ diff.add(new_block_id)
+ table_clusters.add(table_start + table_size)
+ table_size += 1
+ return table_clusters, blocks, clusters
+
+ def create_table_entry(table_offset, block_cluster, block_size,
+ cluster):
+ """Generate a refcount table entry."""
+ offset = table_offset + UINT64_S * (cluster / block_size)
+ return ['>Q', offset, block_cluster * self.cluster_size,
+ 'refcount_table_entry']
+
+ def create_block_entry(block_cluster, block_size, cluster):
+ """Generate a list of entries for the current block."""
+ entry_size = self.cluster_size / block_size
+ offset = block_cluster * self.cluster_size
+ entry_offset = offset + entry_size * (cluster % block_size)
+ # While snapshots are not supported all refcounts are set to 1
+ return ['>H', entry_offset, 1, 'refcount_block_entry']
+ # Size of a block entry in bits
+ refcount_bits = 1 << self.header['refcount_order'][0].value
+ # Number of refcount entries per refcount block
+ # Convert self.cluster_size from bytes to bits to have the same
+ # base for the numerator and denominator
+ block_size = self.cluster_size * 8 / refcount_bits
+ meta_data = self._get_metadata()
+ if len(self.data_clusters) == 0:
+ # All metadata for an empty guest image needs 4 clusters:
+ # header, rfc table, rfc block, L1 table.
+ # Header takes cluster #0, other clusters ##1-3 can be used
+ block_clusters = set([random.choice(list(set(range(1, 4)) -
+ meta_data))])
+ block_ids = set([0])
+ table_clusters = set([random.choice(list(set(range(1, 4)) -
+ meta_data -
+ block_clusters))])
+ else:
+ block_clusters, block_ids = \
+ allocate_rfc_blocks(self.data_clusters |
+ meta_data, block_size)
+ table_clusters, block_ids, new_clusters = \
+ allocate_rfc_table(self.data_clusters |
+ meta_data |
+ block_clusters,
+ block_ids,
+ block_size)
+ block_clusters |= new_clusters
+
+ meta_data |= block_clusters | table_clusters
+ table_offset = min(table_clusters) * self.cluster_size
+ block_id = None
+ # Clusters allocated for refcount blocks
+ block_clusters = list(block_clusters)
+ # Indices of refcount blocks
+ block_ids = list(block_ids)
+ # Refcount table entries
+ rfc_table = []
+ # Refcount entries
+ rfc_blocks = []
+
+ for cluster in sorted(self.data_clusters | meta_data):
+ if cluster / block_size != block_id:
+ block_id = cluster / block_size
+ block_cluster = block_clusters[block_ids.index(block_id)]
+ rfc_table.append(create_table_entry(table_offset,
+ block_cluster,
+ block_size, cluster))
+ rfc_blocks.append(create_block_entry(block_cluster, block_size,
+ cluster))
+ self.refcount_table = FieldsList(rfc_table)
+ self.refcount_blocks = FieldsList(rfc_blocks)
+
+ self.header['refcount_table_offset'][0].value = table_offset
+ self.header['refcount_table_clusters'][0].value = len(table_clusters)
+
+ def fuzz(self, fields_to_fuzz=None):
+ """Fuzz an image by corrupting values of a random subset of its fields.
+
+ Without parameters the method fuzzes an entire image.
+
+ If 'fields_to_fuzz' is specified then only fields in this list will be
+ fuzzed. 'fields_to_fuzz' can contain both individual fields and more
+ general image elements as a header or tables.
+
+ In the first case the field will be fuzzed always.
+ In the second a random subset of fields will be selected and fuzzed.
+ """
+ def coin():
+ """Return boolean value proportional to a portion of fields to be
+ fuzzed.
+ """
+ return random.random() < self.bias
+
+ if fields_to_fuzz is None:
+ for field in self:
+ if coin():
+ field.value = getattr(fuzz, field.name)(field.value)
+ else:
+ for item in fields_to_fuzz:
+ if len(item) == 1:
+ for field in getattr(self, item[0]):
+ if coin():
+ field.value = getattr(fuzz,
+ field.name)(field.value)
+ else:
+ # If fields with the requested name were not generated
+ # getattr(self, item[0])[item[1]] returns an empty list
+ for field in getattr(self, item[0])[item[1]]:
+ field.value = getattr(fuzz, field.name)(field.value)
+
+ def write(self, filename):
+ """Write an entire image to the file."""
+ image_file = open(filename, 'w')
+ for field in self:
+ image_file.seek(field.offset)
+ image_file.write(struct.pack(field.fmt, field.value))
+
+ for cluster in sorted(self.data_clusters):
+ image_file.seek(cluster * self.cluster_size)
+ image_file.write(urandom(self.cluster_size))
+
+ # Align the real image size to the cluster size
+ image_file.seek(0, 2)
+ size = image_file.tell()
+ rounded = (size + self.cluster_size - 1) & ~(self.cluster_size - 1)
+ if rounded > size:
+ image_file.seek(rounded - 1)
+ image_file.write("\0")
+ image_file.close()
+
+ @staticmethod
+ def _size_params():
+ """Generate a random image size aligned to a random correct
+ cluster size.
+ """
+ cluster_bits = random.randrange(9, 21)
+ cluster_size = 1 << cluster_bits
+ img_size = random.randrange(0, MAX_IMAGE_SIZE + 1, cluster_size)
+ return (cluster_bits, img_size)
+
+ @staticmethod
+ def _get_available_clusters(used, number):
+ """Return a set of indices of not allocated clusters.
+
+ 'used' contains indices of currently allocated clusters.
+ All clusters that cannot be allocated between 'used' clusters will have
+ indices appended to the end of 'used'.
+ """
+ append_id = max(used) + 1
+ free = set(range(1, append_id)) - used
+ if len(free) >= number:
+ return set(random.sample(free, number))
+ else:
+ return free | set(range(append_id, append_id + number - len(free)))
+
+ @staticmethod
+ def _get_adjacent_clusters(used, size):
+ """Return an index of the first cluster in the sequence of free ones.
+
+ 'used' contains indices of currently allocated clusters. 'size' is the
+ length of the sequence of free clusters.
+ If the sequence of 'size' is not available between 'used' clusters, its
+ first index will be append to the end of 'used'.
+ """
+ def get_cluster_id(lst, length):
+ """Return the first index of the sequence of the specified length
+ or None if the sequence cannot be inserted in the list.
+ """
+ if len(lst) != 0:
+ pairs = []
+ pair = (lst[0], 1)
+ for i in range(1, len(lst)):
+ if lst[i] == lst[i-1] + 1:
+ pair = (lst[i], pair[1] + 1)
+ else:
+ pairs.append(pair)
+ pair = (lst[i], 1)
+ pairs.append(pair)
+ random.shuffle(pairs)
+ for x, s in pairs:
+ if s >= length:
+ return x - length + 1
+ return None
+
+ append_id = max(used) + 1
+ free = list(set(range(1, append_id)) - used)
+ idx = get_cluster_id(free, size)
+ if idx is None:
+ return append_id
+ else:
+ return idx
+
+ @staticmethod
+ def _alloc_data(img_size, cluster_size):
+ """Return a set of random indices of clusters allocated for guest data.
+ """
+ num_of_cls = img_size/cluster_size
+ return set(random.sample(range(1, num_of_cls + 1),
+ random.randint(0, num_of_cls)))
+
+ def _get_metadata(self):
+ """Return indices of clusters allocated for image metadata."""
+ ids = set()
+ for x in self:
+ ids.add(x.offset/self.cluster_size)
+ return ids
+
+
+def create_image(test_img_path, backing_file_name=None, backing_file_fmt=None,
+ fields_to_fuzz=None):
+ """Create a fuzzed image and write it to the specified file."""
+ image = Image(backing_file_name)
+ image.set_backing_file_format(backing_file_fmt)
+ image.create_feature_name_table()
+ image.set_end_of_extension_area()
+ image.create_l_structures()
+ image.create_refcount_structures()
+ image.fuzz(fields_to_fuzz)
+ image.write(test_img_path)
+ return image.image_size
diff --git a/qemu/tests/image-fuzzer/runner.py b/qemu/tests/image-fuzzer/runner.py
new file mode 100755
index 000000000..0a8743ef4
--- /dev/null
+++ b/qemu/tests/image-fuzzer/runner.py
@@ -0,0 +1,437 @@
+#!/usr/bin/env python
+
+# Tool for running fuzz tests
+#
+# Copyright (C) 2014 Maria Kustova <maria.k@catit.be>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+import os
+import signal
+import subprocess
+import random
+import shutil
+from itertools import count
+import time
+import getopt
+import StringIO
+import resource
+
+try:
+ import json
+except ImportError:
+ try:
+ import simplejson as json
+ except ImportError:
+ print >>sys.stderr, \
+ "Warning: Module for JSON processing is not found.\n" \
+ "'--config' and '--command' options are not supported."
+
+# Backing file sizes in MB
+MAX_BACKING_FILE_SIZE = 10
+MIN_BACKING_FILE_SIZE = 1
+
+
+def multilog(msg, *output):
+ """ Write an object to all of specified file descriptors."""
+ for fd in output:
+ fd.write(msg)
+ fd.flush()
+
+
+def str_signal(sig):
+ """ Convert a numeric value of a system signal to the string one
+ defined by the current operational system.
+ """
+ for k, v in signal.__dict__.items():
+ if v == sig:
+ return k
+
+
+def run_app(fd, q_args):
+ """Start an application with specified arguments and return its exit code
+ or kill signal depending on the result of execution.
+ """
+
+ class Alarm(Exception):
+ """Exception for signal.alarm events."""
+ pass
+
+ def handler(*args):
+ """Notify that an alarm event occurred."""
+ raise Alarm
+
+ signal.signal(signal.SIGALRM, handler)
+ signal.alarm(600)
+ term_signal = signal.SIGKILL
+ devnull = open('/dev/null', 'r+')
+ process = subprocess.Popen(q_args, stdin=devnull,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ try:
+ out, err = process.communicate()
+ signal.alarm(0)
+ fd.write(out)
+ fd.write(err)
+ fd.flush()
+ return process.returncode
+
+ except Alarm:
+ os.kill(process.pid, term_signal)
+ fd.write('The command was terminated by timeout.\n')
+ fd.flush()
+ return -term_signal
+
+
+class TestException(Exception):
+ """Exception for errors risen by TestEnv objects."""
+ pass
+
+
+class TestEnv(object):
+
+ """Test object.
+
+ The class sets up test environment, generates backing and test images
+ and executes application under tests with specified arguments and a test
+ image provided.
+
+ All logs are collected.
+
+ The summary log will contain short descriptions and statuses of tests in
+ a run.
+
+ The test log will include application (e.g. 'qemu-img') logs besides info
+ sent to the summary log.
+ """
+
+ def __init__(self, test_id, seed, work_dir, run_log,
+ cleanup=True, log_all=False):
+ """Set test environment in a specified work directory.
+
+ Path to qemu-img and qemu-io will be retrieved from 'QEMU_IMG' and
+ 'QEMU_IO' environment variables.
+ """
+ if seed is not None:
+ self.seed = seed
+ else:
+ self.seed = str(random.randint(0, sys.maxint))
+ random.seed(self.seed)
+
+ self.init_path = os.getcwd()
+ self.work_dir = work_dir
+ self.current_dir = os.path.join(work_dir, 'test-' + test_id)
+ self.qemu_img = \
+ os.environ.get('QEMU_IMG', 'qemu-img').strip().split(' ')
+ self.qemu_io = os.environ.get('QEMU_IO', 'qemu-io').strip().split(' ')
+ self.commands = [['qemu-img', 'check', '-f', 'qcow2', '$test_img'],
+ ['qemu-img', 'info', '-f', 'qcow2', '$test_img'],
+ ['qemu-io', '$test_img', '-c', 'read $off $len'],
+ ['qemu-io', '$test_img', '-c', 'write $off $len'],
+ ['qemu-io', '$test_img', '-c',
+ 'aio_read $off $len'],
+ ['qemu-io', '$test_img', '-c',
+ 'aio_write $off $len'],
+ ['qemu-io', '$test_img', '-c', 'flush'],
+ ['qemu-io', '$test_img', '-c',
+ 'discard $off $len'],
+ ['qemu-io', '$test_img', '-c',
+ 'truncate $off']]
+ for fmt in ['raw', 'vmdk', 'vdi', 'qcow2', 'file', 'qed', 'vpc']:
+ self.commands.append(
+ ['qemu-img', 'convert', '-f', 'qcow2', '-O', fmt,
+ '$test_img', 'converted_image.' + fmt])
+
+ try:
+ os.makedirs(self.current_dir)
+ except OSError, e:
+ print >>sys.stderr, \
+ "Error: The working directory '%s' cannot be used. Reason: %s"\
+ % (self.work_dir, e[1])
+ raise TestException
+ self.log = open(os.path.join(self.current_dir, "test.log"), "w")
+ self.parent_log = open(run_log, "a")
+ self.failed = False
+ self.cleanup = cleanup
+ self.log_all = log_all
+
+ def _create_backing_file(self):
+ """Create a backing file in the current directory.
+
+ Return a tuple of a backing file name and format.
+
+ Format of a backing file is randomly chosen from all formats supported
+ by 'qemu-img create'.
+ """
+ # All formats supported by the 'qemu-img create' command.
+ backing_file_fmt = random.choice(['raw', 'vmdk', 'vdi', 'qcow2',
+ 'file', 'qed', 'vpc'])
+ backing_file_name = 'backing_img.' + backing_file_fmt
+ backing_file_size = random.randint(MIN_BACKING_FILE_SIZE,
+ MAX_BACKING_FILE_SIZE) * (1 << 20)
+ cmd = self.qemu_img + ['create', '-f', backing_file_fmt,
+ backing_file_name, str(backing_file_size)]
+ temp_log = StringIO.StringIO()
+ retcode = run_app(temp_log, cmd)
+ if retcode == 0:
+ temp_log.close()
+ return (backing_file_name, backing_file_fmt)
+ else:
+ multilog("Warning: The %s backing file was not created.\n\n"
+ % backing_file_fmt, sys.stderr, self.log, self.parent_log)
+ self.log.write("Log for the failure:\n" + temp_log.getvalue() +
+ '\n\n')
+ temp_log.close()
+ return (None, None)
+
+ def execute(self, input_commands=None, fuzz_config=None):
+ """ Execute a test.
+
+ The method creates backing and test images, runs test app and analyzes
+ its exit status. If the application was killed by a signal, the test
+ is marked as failed.
+ """
+ if input_commands is None:
+ commands = self.commands
+ else:
+ commands = input_commands
+
+ os.chdir(self.current_dir)
+ backing_file_name, backing_file_fmt = self._create_backing_file()
+ img_size = image_generator.create_image(
+ 'test.img', backing_file_name, backing_file_fmt, fuzz_config)
+ for item in commands:
+ shutil.copy('test.img', 'copy.img')
+ # 'off' and 'len' are multiple of the sector size
+ sector_size = 512
+ start = random.randrange(0, img_size + 1, sector_size)
+ end = random.randrange(start, img_size + 1, sector_size)
+
+ if item[0] == 'qemu-img':
+ current_cmd = list(self.qemu_img)
+ elif item[0] == 'qemu-io':
+ current_cmd = list(self.qemu_io)
+ else:
+ multilog("Warning: test command '%s' is not defined.\n"
+ % item[0], sys.stderr, self.log, self.parent_log)
+ continue
+ # Replace all placeholders with their real values
+ for v in item[1:]:
+ c = (v
+ .replace('$test_img', 'copy.img')
+ .replace('$off', str(start))
+ .replace('$len', str(end - start)))
+ current_cmd.append(c)
+
+ # Log string with the test header
+ test_summary = "Seed: %s\nCommand: %s\nTest directory: %s\n" \
+ "Backing file: %s\n" \
+ % (self.seed, " ".join(current_cmd),
+ self.current_dir, backing_file_name)
+ temp_log = StringIO.StringIO()
+ try:
+ retcode = run_app(temp_log, current_cmd)
+ except OSError, e:
+ multilog("%sError: Start of '%s' failed. Reason: %s\n\n"
+ % (test_summary, os.path.basename(current_cmd[0]),
+ e[1]),
+ sys.stderr, self.log, self.parent_log)
+ raise TestException
+
+ if retcode < 0:
+ self.log.write(temp_log.getvalue())
+ multilog("%sFAIL: Test terminated by signal %s\n\n"
+ % (test_summary, str_signal(-retcode)),
+ sys.stderr, self.log, self.parent_log)
+ self.failed = True
+ else:
+ if self.log_all:
+ self.log.write(temp_log.getvalue())
+ multilog("%sPASS: Application exited with the code " \
+ "'%d'\n\n" % (test_summary, retcode),
+ sys.stdout, self.log, self.parent_log)
+ temp_log.close()
+ os.remove('copy.img')
+
+ def finish(self):
+ """Restore the test environment after a test execution."""
+ self.log.close()
+ self.parent_log.close()
+ os.chdir(self.init_path)
+ if self.cleanup and not self.failed:
+ shutil.rmtree(self.current_dir)
+
+if __name__ == '__main__':
+
+ def usage():
+ print """
+ Usage: runner.py [OPTION...] TEST_DIR IMG_GENERATOR
+
+ Set up test environment in TEST_DIR and run a test in it. A module for
+ test image generation should be specified via IMG_GENERATOR.
+
+ Example:
+ runner.py -c '[["qemu-img", "info", "$test_img"]]' /tmp/test qcow2
+
+ Optional arguments:
+ -h, --help display this help and exit
+ -d, --duration=NUMBER finish tests after NUMBER of seconds
+ -c, --command=JSON run tests for all commands specified in
+ the JSON array
+ -s, --seed=STRING seed for a test image generation,
+ by default will be generated randomly
+ --config=JSON take fuzzer configuration from the JSON
+ array
+ -k, --keep_passed don't remove folders of passed tests
+ -v, --verbose log information about passed tests
+
+ JSON:
+
+ '--command' accepts a JSON array of commands. Each command presents
+ an application under test with all its paramaters as a list of strings,
+ e.g. ["qemu-io", "$test_img", "-c", "write $off $len"].
+
+ Supported application aliases: 'qemu-img' and 'qemu-io'.
+
+ Supported argument aliases: $test_img for the fuzzed image, $off
+ for an offset, $len for length.
+
+ Values for $off and $len will be generated based on the virtual disk
+ size of the fuzzed image.
+
+ Paths to 'qemu-img' and 'qemu-io' are retrevied from 'QEMU_IMG' and
+ 'QEMU_IO' environment variables.
+
+ '--config' accepts a JSON array of fields to be fuzzed, e.g.
+ '[["header"], ["header", "version"]]'.
+
+ Each of the list elements can consist of a complex image element only
+ as ["header"] or ["feature_name_table"] or an exact field as
+ ["header", "version"]. In the first case random portion of the element
+ fields will be fuzzed, in the second one the specified field will be
+ fuzzed always.
+
+ If '--config' argument is specified, fields not listed in
+ the configuration array will not be fuzzed.
+ """
+
+ def run_test(test_id, seed, work_dir, run_log, cleanup, log_all,
+ command, fuzz_config):
+ """Setup environment for one test and execute this test."""
+ try:
+ test = TestEnv(test_id, seed, work_dir, run_log, cleanup,
+ log_all)
+ except TestException:
+ sys.exit(1)
+
+ # Python 2.4 doesn't support 'finally' and 'except' in the same 'try'
+ # block
+ try:
+ try:
+ test.execute(command, fuzz_config)
+ except TestException:
+ sys.exit(1)
+ finally:
+ test.finish()
+
+ def should_continue(duration, start_time):
+ """Return True if a new test can be started and False otherwise."""
+ current_time = int(time.time())
+ return (duration is None) or (current_time - start_time < duration)
+
+ try:
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'c:hs:kvd:',
+ ['command=', 'help', 'seed=', 'config=',
+ 'keep_passed', 'verbose', 'duration='])
+ except getopt.error, e:
+ print >>sys.stderr, \
+ "Error: %s\n\nTry 'runner.py --help' for more information" % e
+ sys.exit(1)
+
+ command = None
+ cleanup = True
+ log_all = False
+ seed = None
+ config = None
+ duration = None
+ for opt, arg in opts:
+ if opt in ('-h', '--help'):
+ usage()
+ sys.exit()
+ elif opt in ('-c', '--command'):
+ try:
+ command = json.loads(arg)
+ except (TypeError, ValueError, NameError), e:
+ print >>sys.stderr, \
+ "Error: JSON array of test commands cannot be loaded.\n" \
+ "Reason: %s" % e
+ sys.exit(1)
+ elif opt in ('-k', '--keep_passed'):
+ cleanup = False
+ elif opt in ('-v', '--verbose'):
+ log_all = True
+ elif opt in ('-s', '--seed'):
+ seed = arg
+ elif opt in ('-d', '--duration'):
+ duration = int(arg)
+ elif opt == '--config':
+ try:
+ config = json.loads(arg)
+ except (TypeError, ValueError, NameError), e:
+ print >>sys.stderr, \
+ "Error: JSON array with the fuzzer configuration cannot" \
+ " be loaded\nReason: %s" % e
+ sys.exit(1)
+
+ if not len(args) == 2:
+ print >>sys.stderr, \
+ "Expected two parameters\nTry 'runner.py --help'" \
+ " for more information."
+ sys.exit(1)
+
+ work_dir = os.path.realpath(args[0])
+ # run_log is created in 'main', because multiple tests are expected to
+ # log in it
+ run_log = os.path.join(work_dir, 'run.log')
+
+ # Add the path to the image generator module to sys.path
+ sys.path.append(os.path.realpath(os.path.dirname(args[1])))
+ # Remove a script extension from image generator module if any
+ generator_name = os.path.splitext(os.path.basename(args[1]))[0]
+
+ try:
+ image_generator = __import__(generator_name)
+ except ImportError, e:
+ print >>sys.stderr, \
+ "Error: The image generator '%s' cannot be imported.\n" \
+ "Reason: %s" % (generator_name, e)
+ sys.exit(1)
+
+ # Enable core dumps
+ resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
+ # If a seed is specified, only one test will be executed.
+ # Otherwise runner will terminate after a keyboard interruption
+ start_time = int(time.time())
+ test_id = count(1)
+ while should_continue(duration, start_time):
+ try:
+ run_test(str(test_id.next()), seed, work_dir, run_log, cleanup,
+ log_all, command, config)
+ except (KeyboardInterrupt, SystemExit):
+ sys.exit(1)
+
+ if seed is not None:
+ break