summaryrefslogtreecommitdiffstats
path: root/qemu/include/migration
diff options
context:
space:
mode:
Diffstat (limited to 'qemu/include/migration')
-rw-r--r--qemu/include/migration/migration.h137
-rw-r--r--qemu/include/migration/postcopy-ram.h99
-rw-r--r--qemu/include/migration/qemu-file.h29
-rw-r--r--qemu/include/migration/vmstate.h142
4 files changed, 375 insertions, 32 deletions
diff --git a/qemu/include/migration/migration.h b/qemu/include/migration/migration.h
index 83346210b..ac2c12c2a 100644
--- a/qemu/include/migration/migration.h
+++ b/qemu/include/migration/migration.h
@@ -18,7 +18,6 @@
#include "qemu-common.h"
#include "qemu/thread.h"
#include "qemu/notify.h"
-#include "qapi/error.h"
#include "migration/vmstate.h"
#include "qapi-types.h"
#include "exec/cpu-common.h"
@@ -35,6 +34,7 @@
#define QEMU_VM_SUBSECTION 0x05
#define QEMU_VM_VMDESCRIPTION 0x06
#define QEMU_VM_CONFIGURATION 0x07
+#define QEMU_VM_COMMAND 0x08
#define QEMU_VM_SECTION_FOOTER 0x7e
struct MigrationParams {
@@ -42,14 +42,71 @@ struct MigrationParams {
bool shared;
};
-typedef struct MigrationState MigrationState;
+/* Messages sent on the return path from destination to source */
+enum mig_rp_message_type {
+ MIG_RP_MSG_INVALID = 0, /* Must be 0 */
+ MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */
+ MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */
+
+ MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
+ MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */
+
+ MIG_RP_MSG_MAX
+};
typedef QLIST_HEAD(, LoadStateEntry) LoadStateEntry_Head;
+/* The current postcopy state is read/set by postcopy_state_get/set
+ * which update it atomically.
+ * The state is updated as postcopy messages are received, and
+ * in general only one thread should be writing to the state at any one
+ * time, initially the main thread and then the listen thread;
+ * Corner cases are where either thread finishes early and/or errors.
+ * The state is checked as messages are received to ensure that
+ * the source is sending us messages in the correct order.
+ * The state is also used by the RAM reception code to know if it
+ * has to place pages atomically, and the cleanup code at the end of
+ * the main thread to know if it has to delay cleanup until the end
+ * of postcopy.
+ */
+typedef enum {
+ POSTCOPY_INCOMING_NONE = 0, /* Initial state - no postcopy */
+ POSTCOPY_INCOMING_ADVISE,
+ POSTCOPY_INCOMING_DISCARD,
+ POSTCOPY_INCOMING_LISTENING,
+ POSTCOPY_INCOMING_RUNNING,
+ POSTCOPY_INCOMING_END
+} PostcopyState;
+
/* State for the incoming migration */
struct MigrationIncomingState {
- QEMUFile *file;
+ QEMUFile *from_src_file;
+
+ /*
+ * Free at the start of the main state load, set as the main thread finishes
+ * loading state.
+ */
+ QemuEvent main_thread_load_event;
+
+ bool have_fault_thread;
+ QemuThread fault_thread;
+ QemuSemaphore fault_thread_sem;
+ bool have_listen_thread;
+ QemuThread listen_thread;
+ QemuSemaphore listen_thread_sem;
+
+ /* For the kernel to send us notifications */
+ int userfault_fd;
+ /* To tell the fault_thread to quit */
+ int userfault_quit_fd;
+ QEMUFile *to_src_file;
+ QemuMutex rp_mutex; /* We send replies from multiple threads */
+ void *postcopy_tmp_page;
+
+ QEMUBH *bh;
+
+ int state;
/* See savevm.c */
LoadStateEntry_Head loadvm_handlers;
};
@@ -58,6 +115,18 @@ MigrationIncomingState *migration_incoming_get_current(void);
MigrationIncomingState *migration_incoming_state_new(QEMUFile *f);
void migration_incoming_state_destroy(void);
+/*
+ * An outstanding page request, on the source, having been received
+ * and queued
+ */
+struct MigrationSrcPageRequest {
+ RAMBlock *rb;
+ hwaddr offset;
+ hwaddr len;
+
+ QSIMPLEQ_ENTRY(MigrationSrcPageRequest) next_req;
+};
+
struct MigrationState
{
int64_t bandwidth_limit;
@@ -65,23 +134,47 @@ struct MigrationState
size_t xfer_limit;
QemuThread thread;
QEMUBH *cleanup_bh;
- QEMUFile *file;
- int parameters[MIGRATION_PARAMETER_MAX];
+ QEMUFile *to_dst_file;
+ int parameters[MIGRATION_PARAMETER__MAX];
int state;
MigrationParams params;
+
+ /* State related to return path */
+ struct {
+ QEMUFile *from_dst_file;
+ QemuThread rp_thread;
+ bool error;
+ } rp_state;
+
double mbps;
int64_t total_time;
int64_t downtime;
int64_t expected_downtime;
int64_t dirty_pages_rate;
int64_t dirty_bytes_rate;
- bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
+ bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
int64_t xbzrle_cache_size;
int64_t setup_time;
int64_t dirty_sync_count;
+
+ /* Flag set once the migration has been asked to enter postcopy */
+ bool start_postcopy;
+ /* Flag set after postcopy has sent the device state */
+ bool postcopy_after_devices;
+
+ /* Flag set once the migration thread is running (and needs joining) */
+ bool migration_thread_running;
+
+ /* Queue of outstanding page requests from the destination */
+ QemuMutex src_page_req_mutex;
+ QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
+ /* The RAMBlock used in the last src_page_request */
+ RAMBlock *last_req_rb;
};
+void migrate_set_state(int *state, int old_state, int new_state);
+
void process_incoming_migration(QEMUFile *f);
void qemu_start_incoming_migration(const char *uri, Error **errp);
@@ -116,9 +209,14 @@ int migrate_fd_close(MigrationState *s);
void add_migration_state_change_notifier(Notifier *notify);
void remove_migration_state_change_notifier(Notifier *notify);
+MigrationState *migrate_init(const MigrationParams *params);
bool migration_in_setup(MigrationState *);
bool migration_has_finished(MigrationState *);
bool migration_has_failed(MigrationState *);
+/* True if outgoing migration has entered postcopy phase */
+bool migration_in_postcopy(MigrationState *);
+/* ...and after the device transmission */
+bool migration_in_postcopy_after_devices(MigrationState *);
MigrationState *migrate_get_current(void);
void migrate_compress_threads_create(void);
@@ -145,6 +243,13 @@ uint64_t xbzrle_mig_pages_cache_miss(void);
double xbzrle_mig_cache_miss_rate(void);
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size);
+void ram_debug_dump_bitmap(unsigned long *todump, bool expected);
+/* For outgoing discard bitmap */
+int ram_postcopy_send_discard_bitmap(MigrationState *ms);
+/* For incoming postcopy discard */
+int ram_discard_range(MigrationIncomingState *mis, const char *block_name,
+ uint64_t start, size_t length);
+int ram_postcopy_incoming_init(MigrationIncomingState *mis);
/**
* @migrate_add_blocker - prevent migration from proceeding
@@ -160,6 +265,7 @@ void migrate_add_blocker(Error *reason);
*/
void migrate_del_blocker(Error *reason);
+bool migrate_postcopy_ram(void);
bool migrate_zero_blocks(void);
bool migrate_auto_converge(void);
@@ -179,6 +285,17 @@ int migrate_compress_threads(void);
int migrate_decompress_threads(void);
bool migrate_use_events(void);
+/* Sending on the return path - generic and then for each message type */
+void migrate_send_rp_message(MigrationIncomingState *mis,
+ enum mig_rp_message_type message_type,
+ uint16_t len, void *data);
+void migrate_send_rp_shut(MigrationIncomingState *mis,
+ uint32_t value);
+void migrate_send_rp_pong(MigrationIncomingState *mis,
+ uint32_t value);
+void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
+ ram_addr_t start, size_t len);
+
void ram_control_before_iterate(QEMUFile *f, uint64_t flags);
void ram_control_after_iterate(QEMUFile *f, uint64_t flags);
void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data);
@@ -204,4 +321,12 @@ void global_state_set_optional(void);
void savevm_skip_configuration(void);
int global_state_store(void);
void global_state_store_running(void);
+
+void flush_page_queue(MigrationState *ms);
+int ram_save_queue_pages(MigrationState *ms, const char *rbname,
+ ram_addr_t start, ram_addr_t len);
+
+PostcopyState postcopy_state_get(void);
+/* Set the state and return the old state */
+PostcopyState postcopy_state_set(PostcopyState new_state);
#endif
diff --git a/qemu/include/migration/postcopy-ram.h b/qemu/include/migration/postcopy-ram.h
new file mode 100644
index 000000000..b6a7491f2
--- /dev/null
+++ b/qemu/include/migration/postcopy-ram.h
@@ -0,0 +1,99 @@
+/*
+ * Postcopy migration for RAM
+ *
+ * Copyright 2013 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Dave Gilbert <dgilbert@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef QEMU_POSTCOPY_RAM_H
+#define QEMU_POSTCOPY_RAM_H
+
+/* Return true if the host supports everything we need to do postcopy-ram */
+bool postcopy_ram_supported_by_host(void);
+
+/*
+ * Make all of RAM sensitive to accesses to areas that haven't yet been written
+ * and wire up anything necessary to deal with it.
+ */
+int postcopy_ram_enable_notify(MigrationIncomingState *mis);
+
+/*
+ * Initialise postcopy-ram, setting the RAM to a state where we can go into
+ * postcopy later; must be called prior to any precopy.
+ * called from ram.c's similarly named ram_postcopy_incoming_init
+ */
+int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages);
+
+/*
+ * At the end of a migration where postcopy_ram_incoming_init was called.
+ */
+int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis);
+
+/*
+ * Discard the contents of 'length' bytes from 'start'
+ * We can assume that if we've been called postcopy_ram_hosttest returned true
+ */
+int postcopy_ram_discard_range(MigrationIncomingState *mis, uint8_t *start,
+ size_t length);
+
+/*
+ * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard
+ * however leaving it until after precopy means that most of the precopy
+ * data is still THPd
+ */
+int postcopy_ram_prepare_discard(MigrationIncomingState *mis);
+
+/*
+ * Called at the start of each RAMBlock by the bitmap code.
+ * 'offset' is the bitmap offset of the named RAMBlock in the migration
+ * bitmap.
+ * Returns a new PDS
+ */
+PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms,
+ unsigned long offset,
+ const char *name);
+
+/*
+ * Called by the bitmap code for each chunk to discard.
+ * May send a discard message, may just leave it queued to
+ * be sent later.
+ * @start,@length: a range of pages in the migration bitmap in the
+ * RAM block passed to postcopy_discard_send_init() (length=1 is one page)
+ */
+void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
+ unsigned long start, unsigned long length);
+
+/*
+ * Called at the end of each RAMBlock by the bitmap code.
+ * Sends any outstanding discard messages, frees the PDS.
+ */
+void postcopy_discard_send_finish(MigrationState *ms,
+ PostcopyDiscardState *pds);
+
+/*
+ * Place a page (from) at (host) efficiently
+ * There are restrictions on how 'from' must be mapped, in general best
+ * to use other postcopy_ routines to allocate.
+ * returns 0 on success
+ */
+int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from);
+
+/*
+ * Place a zero page at (host) atomically
+ * returns 0 on success
+ */
+int postcopy_place_page_zero(MigrationIncomingState *mis, void *host);
+
+/*
+ * Allocate a page of memory that can be mapped at a later point in time
+ * using postcopy_place_page
+ * Returns: Pointer to allocated page
+ */
+void *postcopy_get_tmp_page(MigrationIncomingState *mis);
+
+#endif
diff --git a/qemu/include/migration/qemu-file.h b/qemu/include/migration/qemu-file.h
index ea49f33fa..3f6b4ed58 100644
--- a/qemu/include/migration/qemu-file.h
+++ b/qemu/include/migration/qemu-file.h
@@ -25,21 +25,20 @@
#define QEMU_FILE_H 1
#include "exec/cpu-common.h"
-#include <stdint.h>
/* This function writes a chunk of data to a file at the given position.
* The pos argument can be ignored if the file is only being used for
* streaming. The handler should try to write all of the data it can.
*/
-typedef int (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf,
- int64_t pos, int size);
+typedef ssize_t (QEMUFilePutBufferFunc)(void *opaque, const uint8_t *buf,
+ int64_t pos, size_t size);
/* Read a chunk of data from a file at the given position. The pos argument
* can be ignored if the file is only be used for streaming. The number of
* bytes actually read should be returned.
*/
-typedef int (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf,
- int64_t pos, int size);
+typedef ssize_t (QEMUFileGetBufferFunc)(void *opaque, uint8_t *buf,
+ int64_t pos, size_t size);
/* Close a file
*
@@ -89,6 +88,11 @@ typedef size_t (QEMURamSaveFunc)(QEMUFile *f, void *opaque,
uint64_t *bytes_sent);
/*
+ * Return a QEMUFile for comms in the opposite direction
+ */
+typedef QEMUFile *(QEMURetPathFunc)(void *opaque);
+
+/*
* Stop any read or write (depending on flags) on the underlying
* transport on the QEMUFile.
* Existing blocking reads/writes must be woken
@@ -106,6 +110,7 @@ typedef struct QEMUFileOps {
QEMURamHookFunc *after_ram_iterate;
QEMURamHookFunc *hook_ram_load;
QEMURamSaveFunc *save_page;
+ QEMURetPathFunc *get_return_path;
QEMUFileShutdownFunc *shut_down;
} QEMUFileOps;
@@ -126,13 +131,13 @@ int qemu_get_fd(QEMUFile *f);
int qemu_fclose(QEMUFile *f);
int64_t qemu_ftell(QEMUFile *f);
int64_t qemu_ftell_fast(QEMUFile *f);
-void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, int size);
+void qemu_put_buffer(QEMUFile *f, const uint8_t *buf, size_t size);
void qemu_put_byte(QEMUFile *f, int v);
/*
* put_buffer without copying the buffer.
* The buffer should be available till it is sent asynchronously.
*/
-void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, int size);
+void qemu_put_buffer_async(QEMUFile *f, const uint8_t *buf, size_t size);
bool qemu_file_mode_is_not_valid(const char *mode);
bool qemu_file_is_writable(QEMUFile *f);
@@ -161,11 +166,13 @@ static inline void qemu_put_ubyte(QEMUFile *f, unsigned int v)
void qemu_put_be16(QEMUFile *f, unsigned int v);
void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
-int qemu_peek_buffer(QEMUFile *f, uint8_t **buf, int size, size_t offset);
-int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size);
+size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset);
+size_t qemu_get_buffer(QEMUFile *f, uint8_t *buf, size_t size);
+size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size);
ssize_t qemu_put_compression_data(QEMUFile *f, const uint8_t *p, size_t size,
int level);
int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src);
+
/*
* Note that you can only peek continuous bytes from where the current pointer
* is; you aren't guaranteed to be able to peak to +n bytes unless you've
@@ -194,7 +201,9 @@ int64_t qemu_file_get_rate_limit(QEMUFile *f);
int qemu_file_get_error(QEMUFile *f);
void qemu_file_set_error(QEMUFile *f, int ret);
int qemu_file_shutdown(QEMUFile *f);
+QEMUFile *qemu_file_get_return_path(QEMUFile *f);
void qemu_fflush(QEMUFile *f);
+void qemu_file_set_blocking(QEMUFile *f, bool block);
static inline void qemu_put_be64s(QEMUFile *f, const uint64_t *pv)
{
@@ -237,7 +246,7 @@ static inline void qemu_get_8s(QEMUFile *f, uint8_t *pv)
}
// Signed versions for type safety
-static inline void qemu_put_sbuffer(QEMUFile *f, const int8_t *buf, int size)
+static inline void qemu_put_sbuffer(QEMUFile *f, const int8_t *buf, size_t size)
{
qemu_put_buffer(f, (const uint8_t *)buf, size);
}
diff --git a/qemu/include/migration/vmstate.h b/qemu/include/migration/vmstate.h
index 2e5a97dec..84ee355ce 100644
--- a/qemu/include/migration/vmstate.h
+++ b/qemu/include/migration/vmstate.h
@@ -39,8 +39,9 @@ typedef struct SaveVMHandlers {
void (*set_params)(const MigrationParams *params, void * opaque);
SaveStateHandler *save_state;
- void (*cancel)(void *opaque);
- int (*save_live_complete)(QEMUFile *f, void *opaque);
+ void (*cleanup)(void *opaque);
+ int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
+ int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
/* This runs both outside and inside the iothread lock. */
bool (*is_active)(void *opaque);
@@ -54,8 +55,9 @@ typedef struct SaveVMHandlers {
/* This runs outside the iothread lock! */
int (*save_live_setup)(QEMUFile *f, void *opaque);
- uint64_t (*save_live_pending)(QEMUFile *f, void *opaque, uint64_t max_size);
-
+ void (*save_live_pending)(QEMUFile *f, void *opaque, uint64_t max_size,
+ uint64_t *non_postcopiable_pending,
+ uint64_t *postcopiable_pending);
LoadStateHandler *load_state;
} SaveVMHandlers;
@@ -86,20 +88,101 @@ struct VMStateInfo {
};
enum VMStateFlags {
+ /* Ignored */
VMS_SINGLE = 0x001,
+
+ /* The struct member at opaque + VMStateField.offset is a pointer
+ * to the actual field (e.g. struct a { uint8_t *b;
+ * }). Dereference the pointer before using it as basis for
+ * further pointer arithmetic (see e.g. VMS_ARRAY). Does not
+ * affect the meaning of VMStateField.num_offset or
+ * VMStateField.size_offset; see VMS_VARRAY* and VMS_VBUFFER for
+ * those. */
VMS_POINTER = 0x002,
+
+ /* The field is an array of fixed size. VMStateField.num contains
+ * the number of entries in the array. The size of each entry is
+ * given by VMStateField.size and / or opaque +
+ * VMStateField.size_offset; see VMS_VBUFFER and
+ * VMS_MULTIPLY. Each array entry will be processed individually
+ * (VMStateField.info.get()/put() if VMS_STRUCT is not set,
+ * recursion into VMStateField.vmsd if VMS_STRUCT is set). May not
+ * be combined with VMS_VARRAY*. */
VMS_ARRAY = 0x004,
+
+ /* The field is itself a struct, containing one or more
+ * fields. Recurse into VMStateField.vmsd. Most useful in
+ * combination with VMS_ARRAY / VMS_VARRAY*, recursing into each
+ * array entry. */
VMS_STRUCT = 0x008,
- VMS_VARRAY_INT32 = 0x010, /* Array with size in int32_t field*/
- VMS_BUFFER = 0x020, /* static sized buffer */
+
+ /* The field is an array of variable size. The int32_t at opaque +
+ * VMStateField.num_offset contains the number of entries in the
+ * array. See the VMS_ARRAY description regarding array handling
+ * in general. May not be combined with VMS_ARRAY or any other
+ * VMS_VARRAY*. */
+ VMS_VARRAY_INT32 = 0x010,
+
+ /* Ignored */
+ VMS_BUFFER = 0x020,
+
+ /* The field is a (fixed-size or variable-size) array of pointers
+ * (e.g. struct a { uint8_t *b[]; }). Dereference each array entry
+ * before using it. Note: Does not imply any one of VMS_ARRAY /
+ * VMS_VARRAY*; these need to be set explicitly. */
VMS_ARRAY_OF_POINTER = 0x040,
- VMS_VARRAY_UINT16 = 0x080, /* Array with size in uint16_t field */
- VMS_VBUFFER = 0x100, /* Buffer with size in int32_t field */
- VMS_MULTIPLY = 0x200, /* multiply "size" field by field_size */
- VMS_VARRAY_UINT8 = 0x400, /* Array with size in uint8_t field*/
- VMS_VARRAY_UINT32 = 0x800, /* Array with size in uint32_t field*/
- VMS_MUST_EXIST = 0x1000, /* Field must exist in input */
- VMS_ALLOC = 0x2000, /* Alloc a buffer on the destination */
+
+ /* The field is an array of variable size. The uint16_t at opaque
+ * + VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS)
+ * contains the number of entries in the array. See the VMS_ARRAY
+ * description regarding array handling in general. May not be
+ * combined with VMS_ARRAY or any other VMS_VARRAY*. */
+ VMS_VARRAY_UINT16 = 0x080,
+
+ /* The size of the individual entries (a single array entry if
+ * VMS_ARRAY or any of VMS_VARRAY* are set, or the field itself if
+ * neither is set) is variable (i.e. not known at compile-time),
+ * but the same for all entries. Use the int32_t at opaque +
+ * VMStateField.size_offset (subject to VMS_MULTIPLY) to determine
+ * the size of each (and every) entry. */
+ VMS_VBUFFER = 0x100,
+
+ /* Multiply the entry size given by the int32_t at opaque +
+ * VMStateField.size_offset (see VMS_VBUFFER description) with
+ * VMStateField.size to determine the number of bytes to be
+ * allocated. Only valid in combination with VMS_VBUFFER. */
+ VMS_MULTIPLY = 0x200,
+
+ /* The field is an array of variable size. The uint8_t at opaque +
+ * VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS)
+ * contains the number of entries in the array. See the VMS_ARRAY
+ * description regarding array handling in general. May not be
+ * combined with VMS_ARRAY or any other VMS_VARRAY*. */
+ VMS_VARRAY_UINT8 = 0x400,
+
+ /* The field is an array of variable size. The uint32_t at opaque
+ * + VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS)
+ * contains the number of entries in the array. See the VMS_ARRAY
+ * description regarding array handling in general. May not be
+ * combined with VMS_ARRAY or any other VMS_VARRAY*. */
+ VMS_VARRAY_UINT32 = 0x800,
+
+ /* Fail loading the serialised VM state if this field is missing
+ * from the input. */
+ VMS_MUST_EXIST = 0x1000,
+
+ /* When loading serialised VM state, allocate memory for the
+ * (entire) field. Only valid in combination with
+ * VMS_POINTER. Note: Not all combinations with other flags are
+ * currently supported, e.g. VMS_ALLOC|VMS_ARRAY_OF_POINTER won't
+ * cause the individual entries to be allocated. */
+ VMS_ALLOC = 0x2000,
+
+ /* Multiply the number of entries given by the integer at opaque +
+ * VMStateField.num_offset (see VMS_VARRAY*) with VMStateField.num
+ * to determine the number of entries in the array. Only valid in
+ * combination with one of VMS_VARRAY*. */
+ VMS_MULTIPLY_ELEMENTS = 0x4000,
};
typedef struct {
@@ -154,6 +237,7 @@ extern const VMStateInfo vmstate_info_uint32;
extern const VMStateInfo vmstate_info_uint64;
extern const VMStateInfo vmstate_info_float64;
+extern const VMStateInfo vmstate_info_cpudouble;
extern const VMStateInfo vmstate_info_timer;
extern const VMStateInfo vmstate_info_buffer;
@@ -243,6 +327,16 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = vmstate_offset_2darray(_state, _field, _type, _n1, _n2), \
}
+#define VMSTATE_VARRAY_MULTIPLY(_field, _state, _field_num, _multiply, _info, _type) { \
+ .name = (stringify(_field)), \
+ .num_offset = vmstate_offset_value(_state, _field_num, uint32_t),\
+ .num = (_multiply), \
+ .info = &(_info), \
+ .size = sizeof(_type), \
+ .flags = VMS_VARRAY_UINT32|VMS_MULTIPLY_ELEMENTS, \
+ .offset = offsetof(_state, _field), \
+}
+
#define VMSTATE_ARRAY_TEST(_field, _state, _num, _test, _info, _type) {\
.name = (stringify(_field)), \
.field_exists = (_test), \
@@ -382,6 +476,19 @@ extern const VMStateInfo vmstate_info_bitmap;
.offset = offsetof(_state, _field), \
}
+/* a variable length array (i.e. _type *_field) but we know the
+ * length
+ */
+#define VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(_field, _state, _num, _version, _vmsd, _type) { \
+ .name = (stringify(_field)), \
+ .num = (_num), \
+ .version_id = (_version), \
+ .vmsd = &(_vmsd), \
+ .size = sizeof(_type), \
+ .flags = VMS_STRUCT|VMS_ARRAY|VMS_POINTER, \
+ .offset = offsetof(_state, _field), \
+}
+
#define VMSTATE_STRUCT_VARRAY_POINTER_INT32(_field, _state, _field_num, _vmsd, _type) { \
.name = (stringify(_field)), \
.version_id = 0, \
@@ -754,9 +861,6 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_UINT32_SUB_ARRAY(_f, _s, _start, _num) \
VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint32, uint32_t)
-#define VMSTATE_UINT32_ARRAY(_f, _s, _n) \
- VMSTATE_UINT32_ARRAY_V(_f, _s, _n, 0)
-
#define VMSTATE_INT64_ARRAY_V(_f, _s, _n, _v) \
VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_int64, int64_t)
@@ -769,6 +873,12 @@ extern const VMStateInfo vmstate_info_bitmap;
#define VMSTATE_FLOAT64_ARRAY(_f, _s, _n) \
VMSTATE_FLOAT64_ARRAY_V(_f, _s, _n, 0)
+#define VMSTATE_CPUDOUBLE_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_cpudouble, CPU_DoubleU)
+
+#define VMSTATE_CPUDOUBLE_ARRAY(_f, _s, _n) \
+ VMSTATE_CPUDOUBLE_ARRAY_V(_f, _s, _n, 0)
+
#define VMSTATE_BUFFER_V(_f, _s, _v) \
VMSTATE_STATIC_BUFFER(_f, _s, _v, NULL, 0, sizeof(typeof_field(_s, _f)))