summaryrefslogtreecommitdiffstats
path: root/qemu/include/exec
diff options
context:
space:
mode:
Diffstat (limited to 'qemu/include/exec')
-rw-r--r--qemu/include/exec/cpu-all.h59
-rw-r--r--qemu/include/exec/cpu-common.h6
-rw-r--r--qemu/include/exec/cpu-defs.h26
-rw-r--r--qemu/include/exec/cpu_ldst.h31
-rw-r--r--qemu/include/exec/cpu_ldst_template.h59
-rw-r--r--qemu/include/exec/cpu_ldst_useronly_template.h25
-rw-r--r--qemu/include/exec/cputlb.h16
-rw-r--r--qemu/include/exec/exec-all.h213
-rw-r--r--qemu/include/exec/gdbstub.h27
-rw-r--r--qemu/include/exec/helper-head.h1
-rw-r--r--qemu/include/exec/log.h60
-rw-r--r--qemu/include/exec/memattrs.h4
-rw-r--r--qemu/include/exec/memory.h184
-rw-r--r--qemu/include/exec/ram_addr.h276
-rw-r--r--qemu/include/exec/softmmu-semi.h18
-rw-r--r--qemu/include/exec/spinlock.h49
-rw-r--r--qemu/include/exec/user/thunk.h1
17 files changed, 747 insertions, 308 deletions
diff --git a/qemu/include/exec/cpu-all.h b/qemu/include/exec/cpu-all.h
index ea6a9a667..08e5093d0 100644
--- a/qemu/include/exec/cpu-all.h
+++ b/qemu/include/exec/cpu-all.h
@@ -154,24 +154,16 @@ static inline void tswap64s(uint64_t *s)
/* MMU memory access macros */
#if defined(CONFIG_USER_ONLY)
-#include <assert.h>
#include "exec/user/abitypes.h"
/* On some host systems the guest address space is reserved on the host.
* This allows the guest address space to be offset to a convenient location.
*/
-#if defined(CONFIG_USE_GUEST_BASE)
extern unsigned long guest_base;
extern int have_guest_base;
extern unsigned long reserved_va;
-#define GUEST_BASE guest_base
-#define RESERVED_VA reserved_va
-#else
-#define GUEST_BASE 0ul
-#define RESERVED_VA 0ul
-#endif
-#define GUEST_ADDR_MAX (RESERVED_VA ? RESERVED_VA : \
+#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \
(1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
#endif
@@ -181,11 +173,13 @@ extern unsigned long reserved_va;
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
-/* ??? These should be the larger of uintptr_t and target_ulong. */
+/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
+ * when intptr_t is 32-bit and we are aligning a long long.
+ */
extern uintptr_t qemu_real_host_page_size;
-extern uintptr_t qemu_real_host_page_mask;
+extern intptr_t qemu_real_host_page_mask;
extern uintptr_t qemu_host_page_size;
-extern uintptr_t qemu_host_page_mask;
+extern intptr_t qemu_host_page_mask;
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
@@ -273,44 +267,6 @@ CPUArchState *cpu_copy(CPUArchState *env);
#if !defined(CONFIG_USER_ONLY)
-/* memory API */
-
-typedef struct RAMBlock RAMBlock;
-
-struct RAMBlock {
- struct rcu_head rcu;
- struct MemoryRegion *mr;
- uint8_t *host;
- ram_addr_t offset;
- ram_addr_t used_length;
- ram_addr_t max_length;
- void (*resized)(const char*, uint64_t length, void *host);
- uint32_t flags;
- /* Protected by iothread lock. */
- char idstr[256];
- /* RCU-enabled, writes protected by the ramlist lock */
- QLIST_ENTRY(RAMBlock) next;
- int fd;
-};
-
-static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
-{
- assert(offset < block->used_length);
- assert(block->host);
- return (char *)block->host + offset;
-}
-
-typedef struct RAMList {
- QemuMutex mutex;
- /* Protected by the iothread lock. */
- unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
- RAMBlock *mru_block;
- /* RCU-enabled, writes protected by the ramlist lock. */
- QLIST_HEAD(, RAMBlock) blocks;
- uint32_t version;
-} RAMList;
-extern RAMList ram_list;
-
/* Flags stored in the low bits of the TLB virtual address. These are
defined so that fast path ram access is all zeros. */
/* Zero if TLB entry is valid. */
@@ -323,9 +279,6 @@ extern RAMList ram_list;
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
-ram_addr_t last_ram_offset(void);
-void qemu_mutex_lock_ramlist(void);
-void qemu_mutex_unlock_ramlist(void);
#endif /* !CONFIG_USER_ONLY */
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
diff --git a/qemu/include/exec/cpu-common.h b/qemu/include/exec/cpu-common.h
index 9fb1d541d..9e839e50c 100644
--- a/qemu/include/exec/cpu-common.h
+++ b/qemu/include/exec/cpu-common.h
@@ -14,7 +14,6 @@
#include "qemu/bswap.h"
#include "qemu/queue.h"
#include "qemu/fprintf-fn.h"
-#include "qemu/typedefs.h"
/**
* CPUListState:
@@ -54,7 +53,6 @@ typedef uintptr_t ram_addr_t;
#endif
extern ram_addr_t ram_size;
-ram_addr_t get_current_ram_size(void);
/* memory API */
@@ -64,8 +62,12 @@ typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should not be used by devices. */
MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+RAMBlock *qemu_ram_block_by_name(const char *name);
+RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
+ ram_addr_t *ram_addr, ram_addr_t *offset);
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev);
void qemu_ram_unset_idstr(ram_addr_t addr);
+const char *qemu_ram_get_idstr(RAMBlock *rb);
void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
int len, int is_write);
diff --git a/qemu/include/exec/cpu-defs.h b/qemu/include/exec/cpu-defs.h
index 98b9cff31..854e7e356 100644
--- a/qemu/include/exec/cpu-defs.h
+++ b/qemu/include/exec/cpu-defs.h
@@ -23,9 +23,6 @@
#error cpu.h included from common code
#endif
-#include "config.h"
-#include <inttypes.h>
-#include "qemu/osdep.h"
#include "qemu/queue.h"
#include "tcg-target.h"
#ifndef CONFIG_USER_ONLY
@@ -105,17 +102,18 @@ typedef struct CPUTLBEntry {
bit 3 : indicates that the entry is invalid
bit 2..0 : zero
*/
- target_ulong addr_read;
- target_ulong addr_write;
- target_ulong addr_code;
- /* Addend to virtual address to get host address. IO accesses
- use the corresponding iotlb value. */
- uintptr_t addend;
- /* padding to get a power of two size */
- uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) -
- (sizeof(target_ulong) * 3 +
- ((-sizeof(target_ulong) * 3) & (sizeof(uintptr_t) - 1)) +
- sizeof(uintptr_t))];
+ union {
+ struct {
+ target_ulong addr_read;
+ target_ulong addr_write;
+ target_ulong addr_code;
+ /* Addend to virtual address to get host address. IO accesses
+ use the corresponding iotlb value. */
+ uintptr_t addend;
+ };
+ /* padding to get a power of two size */
+ uint8_t dummy[1 << CPU_TLB_ENTRY_BITS];
+ };
} CPUTLBEntry;
QEMU_BUILD_BUG_ON(sizeof(CPUTLBEntry) != (1 << CPU_TLB_ENTRY_BITS));
diff --git a/qemu/include/exec/cpu_ldst.h b/qemu/include/exec/cpu_ldst.h
index 1239c60f2..b573df53b 100644
--- a/qemu/include/exec/cpu_ldst.h
+++ b/qemu/include/exec/cpu_ldst.h
@@ -49,20 +49,20 @@
#if defined(CONFIG_USER_ONLY)
/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
-#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + GUEST_BASE))
+#define g2h(x) ((void *)((unsigned long)(target_ulong)(x) + guest_base))
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define h2g_valid(x) 1
#else
#define h2g_valid(x) ({ \
- unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
+ unsigned long __guest = (unsigned long)(x) - guest_base; \
(__guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS)) && \
- (!RESERVED_VA || (__guest < RESERVED_VA)); \
+ (!reserved_va || (__guest < reserved_va)); \
})
#endif
#define h2g_nocheck(x) ({ \
- unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
+ unsigned long __ret = (unsigned long)(x) - guest_base; \
(abi_ulong)__ret; \
})
@@ -113,25 +113,6 @@
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
-uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
-void helper_stb_mmu(CPUArchState *env, target_ulong addr,
- uint8_t val, int mmu_idx);
-void helper_stw_mmu(CPUArchState *env, target_ulong addr,
- uint16_t val, int mmu_idx);
-void helper_stl_mmu(CPUArchState *env, target_ulong addr,
- uint32_t val, int mmu_idx);
-void helper_stq_mmu(CPUArchState *env, target_ulong addr,
- uint64_t val, int mmu_idx);
-
-uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
-
#ifdef MMU_MODE0_SUFFIX
#define CPU_MMU_INDEX 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
@@ -363,7 +344,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#endif /* (NB_MMU_MODES > 12) */
/* these access are slower, they must be as rare as possible */
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "exec/cpu_ldst_template.h"
@@ -379,7 +360,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#undef CPU_MMU_INDEX
#undef MEMSUFFIX
-#define CPU_MMU_INDEX (cpu_mmu_index(env))
+#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
#define MEMSUFFIX _code
#define SOFTMMU_CODE_ACCESS
diff --git a/qemu/include/exec/cpu_ldst_template.h b/qemu/include/exec/cpu_ldst_template.h
index 95ab7504e..3091c0003 100644
--- a/qemu/include/exec/cpu_ldst_template.h
+++ b/qemu/include/exec/cpu_ldst_template.h
@@ -27,20 +27,24 @@
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
+#define SHIFT 3
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
+#define SHIFT 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
+#define SHIFT 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
+#define SHIFT 0
#else
#error unsupported data size
#endif
@@ -54,27 +58,36 @@
#ifdef SOFTMMU_CODE_ACCESS
#define ADDR_READ addr_code
#define MMUSUFFIX _cmmu
+#define URETSUFFIX SUFFIX
+#define SRETSUFFIX SUFFIX
#else
#define ADDR_READ addr_read
#define MMUSUFFIX _mmu
+#define URETSUFFIX USUFFIX
+#define SRETSUFFIX glue(s, SUFFIX)
#endif
/* generic load/store macros */
static inline RES_TYPE
-glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
+ target_ulong ptr,
+ uintptr_t retaddr)
{
int page_index;
RES_TYPE res;
target_ulong addr;
int mmu_idx;
+ TCGMemOpIdx oi;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx);
+ oi = make_memop_idx(SHIFT, mmu_idx);
+ res = glue(glue(helper_ret_ld, URETSUFFIX), MMUSUFFIX)(env, addr,
+ oi, retaddr);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr);
@@ -82,27 +95,43 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
return res;
}
+static inline RES_TYPE
+glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ return glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
+}
+
#if DATA_SIZE <= 2
static inline int
-glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
+ target_ulong ptr,
+ uintptr_t retaddr)
{
int res, page_index;
target_ulong addr;
int mmu_idx;
+ TCGMemOpIdx oi;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- res = (DATA_STYPE)glue(glue(helper_ld, SUFFIX),
- MMUSUFFIX)(env, addr, mmu_idx);
+ oi = make_memop_idx(SHIFT, mmu_idx);
+ res = (DATA_STYPE)glue(glue(helper_ret_ld, SRETSUFFIX),
+ MMUSUFFIX)(env, addr, oi, retaddr);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr);
}
return res;
}
+
+static inline int
+glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
+{
+ return glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(env, ptr, 0);
+}
#endif
#ifndef SOFTMMU_CODE_ACCESS
@@ -110,25 +139,36 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
/* generic store macro */
static inline void
-glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
- RES_TYPE v)
+glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
+ target_ulong ptr,
+ RES_TYPE v, uintptr_t retaddr)
{
int page_index;
target_ulong addr;
int mmu_idx;
+ TCGMemOpIdx oi;
addr = ptr;
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = CPU_MMU_INDEX;
if (unlikely(env->tlb_table[mmu_idx][page_index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) {
- glue(glue(helper_st, SUFFIX), MMUSUFFIX)(env, addr, v, mmu_idx);
+ oi = make_memop_idx(SHIFT, mmu_idx);
+ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, v, oi,
+ retaddr);
} else {
uintptr_t hostaddr = addr + env->tlb_table[mmu_idx][page_index].addend;
glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v);
}
}
+static inline void
+glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
+ RES_TYPE v)
+{
+ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(env, ptr, v, 0);
+}
+
#endif /* !SOFTMMU_CODE_ACCESS */
#undef RES_TYPE
@@ -139,3 +179,6 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
#undef DATA_SIZE
#undef MMUSUFFIX
#undef ADDR_READ
+#undef URETSUFFIX
+#undef SRETSUFFIX
+#undef SHIFT
diff --git a/qemu/include/exec/cpu_ldst_useronly_template.h b/qemu/include/exec/cpu_ldst_useronly_template.h
index b3b865fae..040b14743 100644
--- a/qemu/include/exec/cpu_ldst_useronly_template.h
+++ b/qemu/include/exec/cpu_ldst_useronly_template.h
@@ -56,12 +56,28 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
}
+static inline RES_TYPE
+glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
+ target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
+}
+
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr)
{
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
}
+
+static inline int
+glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
+ target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
+}
#endif
#ifndef CODE_ACCESS
@@ -71,6 +87,15 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, target_ulong ptr,
{
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
}
+
+static inline void
+glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
+ target_ulong ptr,
+ RES_TYPE v,
+ uintptr_t retaddr)
+{
+ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
+}
#endif
#undef RES_TYPE
diff --git a/qemu/include/exec/cputlb.h b/qemu/include/exec/cputlb.h
index 360815e1b..d454c005b 100644
--- a/qemu/include/exec/cputlb.h
+++ b/qemu/include/exec/cputlb.h
@@ -25,23 +25,7 @@ void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
uintptr_t length);
-void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length);
-void tlb_set_dirty(CPUArchState *env, target_ulong vaddr);
extern int tlb_flush_count;
-/* exec.c */
-void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
-
-MemoryRegionSection *
-address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
- hwaddr *plen);
-hwaddr memory_region_section_get_iotlb(CPUState *cpu,
- MemoryRegionSection *section,
- target_ulong vaddr,
- hwaddr paddr, hwaddr xlat,
- int prot,
- target_ulong *address);
-bool memory_region_is_unassigned(MemoryRegion *mr);
-
#endif
#endif
diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h
index a6fce04f6..736209505 100644
--- a/qemu/include/exec/exec-all.h
+++ b/qemu/include/exec/exec-all.h
@@ -62,26 +62,16 @@ typedef struct TranslationBlock TranslationBlock;
#define OPC_BUF_SIZE 640
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
-/* Maximum size a TCG op can expand to. This is complicated because a
- single op may require several host instructions and register reloads.
- For now take a wild guess at 192 bytes, which should allow at least
- a couple of fixup instructions per argument. */
-#define TCG_MAX_OP_SIZE 192
-
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
#include "qemu/log.h"
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
-void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
- int pc_pos);
+ target_ulong *data);
void cpu_gen_init(void);
-int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
- int *gen_code_size_ptr);
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
-void page_size_init(void);
void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
@@ -90,20 +80,113 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
int cflags);
void cpu_exec_init(CPUState *cpu, Error **errp);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
+void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#if !defined(CONFIG_USER_ONLY)
-bool qemu_in_vcpu_thread(void);
-void cpu_reload_memory_map(CPUState *cpu);
-void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
+void cpu_reloading_memory_map(void);
+/**
+ * cpu_address_space_init:
+ * @cpu: CPU to add this address space to
+ * @as: address space to add
+ * @asidx: integer index of this address space
+ *
+ * Add the specified address space to the CPU's cpu_ases list.
+ * The address space added with @asidx 0 is the one used for the
+ * convenience pointer cpu->as.
+ * The target-specific code which registers ASes is responsible
+ * for defining what semantics address space 0, 1, 2, etc have.
+ *
+ * Before the first call to this function, the caller must set
+ * cpu->num_ases to the total number of address spaces it needs
+ * to support.
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
+/**
+ * cpu_get_address_space:
+ * @cpu: CPU to get address space from
+ * @asidx: index identifying which address space to get
+ *
+ * Return the requested address space of this CPU. @asidx
+ * specifies which address space to read.
+ */
+AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
/* cputlb.c */
+/**
+ * tlb_flush_page:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
void tlb_flush_page(CPUState *cpu, target_ulong addr);
+/**
+ * tlb_flush:
+ * @cpu: CPU whose TLB should be flushed
+ * @flush_global: ignored
+ *
+ * Flush the entire TLB for the specified CPU.
+ * The flush_global flag is in theory an indicator of whether the whole
+ * TLB should be flushed, or only those entries not marked global.
+ * In practice QEMU does not implement any global/not global flag for
+ * TLB entries, and the argument is ignored.
+ */
void tlb_flush(CPUState *cpu, int flush_global);
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
- hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
+/**
+ * tlb_flush_page_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush one page from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
+/**
+ * tlb_flush_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush all entries from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx(CPUState *cpu, ...);
+/**
+ * tlb_set_page_with_attrs:
+ * @cpu: CPU to add this TLB entry for
+ * @vaddr: virtual address of page to add entry for
+ * @paddr: physical address of the page
+ * @attrs: memory transaction attributes
+ * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
+ * @mmu_idx: MMU index to insert TLB entry for
+ * @size: size of the page in bytes
+ *
+ * Add an entry to this CPU's TLB (a mapping from virtual address
+ * @vaddr to physical address @paddr) with the specified memory
+ * transaction attributes. This is generally called by the target CPU
+ * specific code after it has been called through the tlb_fill()
+ * entry point and performed a successful page table walk to find
+ * the physical address and attributes for the virtual address
+ * which provoked the TLB miss.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
+ * used by tlb_flush_page.
+ */
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, MemTxAttrs attrs,
int prot, int mmu_idx, target_ulong size);
+/* tlb_set_page:
+ *
+ * This function is equivalent to calling tlb_set_page_with_attrs()
+ * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
+ * as a convenience for CPUs which don't use memory transaction attributes.
+ */
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, int prot,
+ int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr);
@@ -115,6 +198,15 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
+
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+ target_ulong addr, ...)
+{
+}
+
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
@@ -122,13 +214,14 @@ static inline void tlb_flush(CPUState *cpu, int flush_global)
#define CODE_GEN_PHYS_HASH_BITS 15
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
-/* estimated block size for TB allocation */
-/* XXX: use a per code average code fragment size and modulate it
- according to the host CPU */
+/* Estimated block size for TB allocation. */
+/* ??? The following is based on a 2015 survey of x86_64 host output.
+ Better would seem to be some sort of dynamically sized TB array,
+ adapting to the block sizes actually being produced. */
#if defined(CONFIG_SOFTMMU)
-#define CODE_GEN_AVG_BLOCK_SIZE 128
+#define CODE_GEN_AVG_BLOCK_SIZE 400
#else
-#define CODE_GEN_AVG_BLOCK_SIZE 64
+#define CODE_GEN_AVG_BLOCK_SIZE 150
#endif
#if defined(__arm__) || defined(_ARCH_PPC) \
@@ -151,10 +244,14 @@ struct TranslationBlock {
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x10000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x20000
+#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
void *tc_ptr; /* pointer to the translated code */
+ uint8_t *tc_search; /* pointer to search data */
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
+ /* original tb when cflags has CF_NOCACHE */
+ struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[] */
struct TranslationBlock *page_next[2];
@@ -176,7 +273,7 @@ struct TranslationBlock {
struct TranslationBlock *jmp_first;
};
-#include "exec/spinlock.h"
+#include "qemu/thread.h"
typedef struct TBContext TBContext;
@@ -186,7 +283,7 @@ struct TBContext {
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
int nb_tbs;
/* any access to the tbs or the page table must use this lock */
- spinlock_t tb_lock;
+ QemuMutex tb_lock;
/* statistics */
int tb_flush_count;
@@ -282,6 +379,11 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
+ qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
+ "Linking TBs %p [" TARGET_FMT_lx
+ "] index %d -> %p [" TARGET_FMT_lx "]\n",
+ tb->tc_ptr, tb->pc, n,
+ tb_next->tc_ptr, tb_next->pc);
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
@@ -308,20 +410,14 @@ extern uintptr_t tci_tb_ptr;
to indicate the compressed mode; subtracting two works around that. It
is also the case that there are no host isas that contain a call insn
smaller than 4 bytes, so we don't worry about special-casing this. */
-#if defined(CONFIG_TCG_INTERPRETER)
-# define GETPC_ADJ 0
-#else
-# define GETPC_ADJ 2
-#endif
+#define GETPC_ADJ 2
#define GETPC() (GETRA() - GETPC_ADJ)
#if !defined(CONFIG_USER_ONLY)
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
-
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
- hwaddr index);
+ hwaddr index, MemTxAttrs attrs);
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
@@ -329,43 +425,44 @@ void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
#endif
#if defined(CONFIG_USER_ONLY)
+void mmap_lock(void);
+void mmap_unlock(void);
+
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
return addr;
}
#else
+static inline void mmap_lock(void) {}
+static inline void mmap_unlock(void) {}
+
/* cputlb.c */
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+
+void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
+
+/* exec.c */
+void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
+
+MemoryRegionSection *
+address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ hwaddr *xlat, hwaddr *plen);
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section,
+ target_ulong vaddr,
+ hwaddr paddr, hwaddr xlat,
+ int prot,
+ target_ulong *address);
+bool memory_region_is_unassigned(MemoryRegion *mr);
+
#endif
/* vl.c */
extern int singlestep;
-/* cpu-exec.c */
-extern volatile sig_atomic_t exit_request;
+/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
+extern CPUState *tcg_current_cpu;
+extern bool exit_request;
-/**
- * cpu_can_do_io:
- * @cpu: The CPU for which to check IO.
- *
- * Deterministic execution requires that IO only be performed on the last
- * instruction of a TB so that interrupts take effect immediately.
- *
- * Returns: %true if memory-mapped IO is safe, %false otherwise.
- */
-static inline bool cpu_can_do_io(CPUState *cpu)
-{
- if (!use_icount) {
- return true;
- }
- /* If not executing code then assume we are ok. */
- if (cpu->current_tb == NULL) {
- return true;
- }
- return cpu->can_do_io != 0;
-}
-
-#if !defined(CONFIG_USER_ONLY)
-void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
-#endif
#endif
diff --git a/qemu/include/exec/gdbstub.h b/qemu/include/exec/gdbstub.h
index 05f57c243..d9e8cf771 100644
--- a/qemu/include/exec/gdbstub.h
+++ b/qemu/include/exec/gdbstub.h
@@ -14,7 +14,34 @@
typedef void (*gdb_syscall_complete_cb)(CPUState *cpu,
target_ulong ret, target_ulong err);
+/**
+ * gdb_do_syscall:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * ...: list of arguments to interpolate into @fmt
+ *
+ * Send a GDB syscall request. This function will return immediately;
+ * the callback function will be called later when the remote system
+ * call has completed.
+ *
+ * @fmt should be in the 'call-id,parameter,parameter...' format documented
+ * for the F request packet in the GDB remote protocol. A limited set of
+ * printf-style format specifiers is supported:
+ * %x - target_ulong argument printed in hex
+ * %lx - 64-bit argument printed in hex
+ * %s - string pointer (target_ulong) and length (int) pair
+ */
void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...);
+/**
+ * gdb_do_syscallv:
+ * @cb: function to call when the system call has completed
+ * @fmt: gdb syscall format string
+ * @va: arguments to interpolate into @fmt
+ *
+ * As gdb_do_syscall, but taking a va_list rather than a variable
+ * argument list.
+ */
+void gdb_do_syscallv(gdb_syscall_complete_cb cb, const char *fmt, va_list va);
int use_gdb_syscalls(void);
void gdb_set_stop_cpu(CPUState *cpu);
void gdb_exit(CPUArchState *, int);
diff --git a/qemu/include/exec/helper-head.h b/qemu/include/exec/helper-head.h
index b009ccb11..ec790432d 100644
--- a/qemu/include/exec/helper-head.h
+++ b/qemu/include/exec/helper-head.h
@@ -18,7 +18,6 @@
#ifndef DEF_HELPER_H
#define DEF_HELPER_H 1
-#include "qemu/osdep.h"
#define HELPER(name) glue(helper_, name)
diff --git a/qemu/include/exec/log.h b/qemu/include/exec/log.h
new file mode 100644
index 000000000..ba1c9b568
--- /dev/null
+++ b/qemu/include/exec/log.h
@@ -0,0 +1,60 @@
+#ifndef QEMU_EXEC_LOG_H
+#define QEMU_EXEC_LOG_H
+
+#include "qemu/log.h"
+#include "qom/cpu.h"
+#include "disas/disas.h"
+
+/* cpu_dump_state() logging functions: */
+/**
+ * log_cpu_state:
+ * @cpu: The CPU whose state is to be logged.
+ * @flags: Flags what to log.
+ *
+ * Logs the output of cpu_dump_state().
+ */
+static inline void log_cpu_state(CPUState *cpu, int flags)
+{
+ if (qemu_log_enabled()) {
+ cpu_dump_state(cpu, qemu_logfile, fprintf, flags);
+ }
+}
+
+/**
+ * log_cpu_state_mask:
+ * @mask: Mask when to log.
+ * @cpu: The CPU whose state is to be logged.
+ * @flags: Flags what to log.
+ *
+ * Logs the output of cpu_dump_state() if loglevel includes @mask.
+ */
+static inline void log_cpu_state_mask(int mask, CPUState *cpu, int flags)
+{
+ if (qemu_loglevel & mask) {
+ log_cpu_state(cpu, flags);
+ }
+}
+
+#ifdef NEED_CPU_H
+/* disas() and target_disas() to qemu_logfile: */
+static inline void log_target_disas(CPUState *cpu, target_ulong start,
+ target_ulong len, int flags)
+{
+ target_disas(qemu_logfile, cpu, start, len, flags);
+}
+
+static inline void log_disas(void *code, unsigned long size)
+{
+ disas(qemu_logfile, code, size);
+}
+
+#if defined(CONFIG_USER_ONLY)
+/* page_dump() output to the log file: */
+static inline void log_page_dump(void)
+{
+ page_dump(qemu_logfile);
+}
+#endif
+#endif
+
+#endif
diff --git a/qemu/include/exec/memattrs.h b/qemu/include/exec/memattrs.h
index f8537a8d9..e60106184 100644
--- a/qemu/include/exec/memattrs.h
+++ b/qemu/include/exec/memattrs.h
@@ -35,8 +35,8 @@ typedef struct MemTxAttrs {
unsigned int secure:1;
/* Memory access is usermode (unprivileged) */
unsigned int user:1;
- /* Stream ID (for MSI for example) */
- unsigned int stream_id:16;
+ /* Requester ID (for MSI for example) */
+ unsigned int requester_id:16;
} MemTxAttrs;
/* Bus masters which don't specify any attributes will get this,
diff --git a/qemu/include/exec/memory.h b/qemu/include/exec/memory.h
index 94d20eae0..e2a3e9953 100644
--- a/qemu/include/exec/memory.h
+++ b/qemu/include/exec/memory.h
@@ -21,8 +21,6 @@
#define DIRTY_MEMORY_MIGRATION 2
#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
-#include <stdint.h>
-#include <stdbool.h>
#include "exec/cpu-common.h"
#ifndef CONFIG_USER_ONLY
#include "exec/hwaddr.h"
@@ -31,7 +29,6 @@
#include "qemu/queue.h"
#include "qemu/int128.h"
#include "qemu/notify.h"
-#include "qapi/error.h"
#include "qom/object.h"
#include "qemu/rcu.h"
@@ -159,27 +156,33 @@ typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
struct MemoryRegion {
Object parent_obj;
+
/* All fields are private - violators will be prosecuted */
- const MemoryRegionOps *ops;
+
+ /* The following fields should fit in a cache line */
+ bool romd_mode;
+ bool ram;
+ bool subpage;
+ bool readonly; /* For RAM regions */
+ bool rom_device;
+ bool flush_coalesced_mmio;
+ bool global_locking;
+ uint8_t dirty_log_mask;
+ RAMBlock *ram_block;
+ Object *owner;
const MemoryRegionIOMMUOps *iommu_ops;
+
+ const MemoryRegionOps *ops;
void *opaque;
MemoryRegion *container;
Int128 size;
hwaddr addr;
void (*destructor)(MemoryRegion *mr);
- ram_addr_t ram_addr;
uint64_t align;
- bool subpage;
bool terminates;
- bool romd_mode;
- bool ram;
bool skip_dump;
- bool readonly; /* For RAM regions */
bool enabled;
- bool rom_device;
bool warning_printed; /* For reservations */
- bool flush_coalesced_mmio;
- bool global_locking;
uint8_t vga_logging_count;
MemoryRegion *alias;
hwaddr alias_offset;
@@ -189,7 +192,6 @@ struct MemoryRegion {
QTAILQ_ENTRY(MemoryRegion) subregions_link;
QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
const char *name;
- uint8_t dirty_log_mask;
unsigned ioeventfd_nb;
MemoryRegionIoeventfd *ioeventfds;
NotifierList iommu_notify;
@@ -236,6 +238,8 @@ struct AddressSpace {
struct rcu_head rcu;
char *name;
MemoryRegion *root;
+ int ref_count;
+ bool malloced;
/* Accessed via RCU. */
struct FlatView *current_map;
@@ -324,7 +328,7 @@ void memory_region_unref(MemoryRegion *mr);
* @owner: the object that tracks the region's reference count
* @ops: a structure containing read and write callbacks to be used when
* I/O is performed on the region.
- * @opaque: passed to to the read and write callbacks of the @ops structure.
+ * @opaque: passed to the read and write callbacks of the @ops structure.
* @name: used for debugging; not visible to the user or ABI
* @size: size of the region.
*/
@@ -437,6 +441,9 @@ void memory_region_init_alias(MemoryRegion *mr,
* memory_region_init_rom_device: Initialize a ROM memory region. Writes are
* handled via callbacks.
*
+ * If NULL callbacks pointer is given, then I/O space is not supposed to be
+ * handled by QEMU itself. Any access via the memory API will cause an abort().
+ *
* @mr: the #MemoryRegion to be initialized.
* @owner: the object that tracks the region's reference count
* @ops: callbacks for write access handling.
@@ -459,16 +466,21 @@ void memory_region_init_rom_device(MemoryRegion *mr,
* A reservation region primariy serves debugging purposes. It claims I/O
* space that is not supposed to be handled by QEMU itself. Any access via
* the memory API will cause an abort().
+ * This function is deprecated. Use memory_region_init_io() with NULL
+ * callbacks instead.
*
* @mr: the #MemoryRegion to be initialized
* @owner: the object that tracks the region's reference count
* @name: used for debugging; not visible to the user or ABI
* @size: size of the region.
*/
-void memory_region_init_reservation(MemoryRegion *mr,
- struct Object *owner,
+static inline void memory_region_init_reservation(MemoryRegion *mr,
+ Object *owner,
const char *name,
- uint64_t size);
+ uint64_t size)
+{
+ memory_region_init_io(mr, owner, NULL, mr, name, size);
+}
/**
* memory_region_init_iommu: Initialize a memory region that translates
@@ -510,7 +522,10 @@ uint64_t memory_region_size(MemoryRegion *mr);
*
* @mr: the memory region being queried
*/
-bool memory_region_is_ram(MemoryRegion *mr);
+static inline bool memory_region_is_ram(MemoryRegion *mr)
+{
+ return mr->ram;
+}
/**
* memory_region_is_skip_dump: check whether a memory region should not be
@@ -550,7 +565,11 @@ static inline bool memory_region_is_romd(MemoryRegion *mr)
*
* @mr: the memory region being queried
*/
-bool memory_region_is_iommu(MemoryRegion *mr);
+static inline bool memory_region_is_iommu(MemoryRegion *mr)
+{
+ return mr->iommu_ops;
+}
+
/**
* memory_region_notify_iommu: notify a change in an IOMMU translation entry.
@@ -575,6 +594,19 @@ void memory_region_notify_iommu(MemoryRegion *mr,
void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
/**
+ * memory_region_iommu_replay: replay existing IOMMU translations to
+ * a notifier
+ *
+ * @mr: the memory region to observe
+ * @n: the notifier to which to replay iommu mappings
+ * @granularity: Minimum page granularity to replay notifications for
+ * @is_write: Whether to treat the replay as a translate "write"
+ * through the iommu
+ */
+void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n,
+ hwaddr granularity, bool is_write);
+
+/**
* memory_region_unregister_iommu_notifier: unregister a notifier for
* changes to IOMMU translation entries.
*
@@ -619,7 +651,11 @@ uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
*
* @mr: the memory region being queried
*/
-bool memory_region_is_rom(MemoryRegion *mr);
+static inline bool memory_region_is_rom(MemoryRegion *mr)
+{
+ return mr->ram && mr->readonly;
+}
+
/**
* memory_region_get_fd: Get a file descriptor backing a RAM memory region.
@@ -635,8 +671,13 @@ int memory_region_get_fd(MemoryRegion *mr);
* memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
*
* Returns a host pointer to a RAM memory region (created with
- * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
- * care.
+ * memory_region_init_ram() or memory_region_init_ram_ptr()).
+ *
+ * Use with care; by the time this function returns, the returned pointer is
+ * not protected by RCU anymore. If the caller is not within an RCU critical
+ * section and does not hold the iothread lock, it must have other means of
+ * protecting the pointer, such as a reference to the region that includes
+ * the incoming ram_addr_t.
*
* @mr: the memory region being queried.
*/
@@ -935,9 +976,6 @@ void memory_region_add_subregion_overlap(MemoryRegion *mr,
/**
* memory_region_get_ram_addr: Get the ram address associated with a memory
* region
- *
- * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
- * code is being reworked.
*/
ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
@@ -1138,12 +1176,28 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
* address_space_init: initializes an address space
*
* @as: an uninitialized #AddressSpace
- * @root: a #MemoryRegion that routes addesses for the address space
+ * @root: a #MemoryRegion that routes addresses for the address space
* @name: an address space name. The name is only used for debugging
* output.
*/
void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
+/**
+ * address_space_init_shareable: return an address space for a memory region,
+ * creating it if it does not already exist
+ *
+ * @root: a #MemoryRegion that routes addresses for the address space
+ * @name: an address space name. The name is only used for debugging
+ * output.
+ *
+ * This function will return a pointer to an existing AddressSpace
+ * which was initialized with the specified MemoryRegion, or it will
+ * create and initialize one if it does not already exist. The ASes
+ * are reference-counted, so the memory will be freed automatically
+ * when the AddressSpace is destroyed via address_space_destroy.
+ */
+AddressSpace *address_space_init_shareable(MemoryRegion *root,
+ const char *name);
/**
* address_space_destroy: destroy an address space
@@ -1189,23 +1243,7 @@ MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
const uint8_t *buf, int len);
-/**
- * address_space_read: read from an address space.
- *
- * Return a MemTxResult indicating whether the operation succeeded
- * or failed (eg unassigned memory, device rejected the transaction,
- * IOMMU fault).
- *
- * @as: #AddressSpace to be accessed
- * @addr: address within that address space
- * @attrs: memory transaction attributes
- * @buf: buffer with the data transferred
- */
-MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
- uint8_t *buf, int len);
-
-/**
- * address_space_ld*: load from an address space
+/* address_space_ld*: load from an address space
* address_space_st*: store to an address space
*
* These functions perform a load or store of the byte, word,
@@ -1335,6 +1373,66 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
int is_write, hwaddr access_len);
+/* Internal functions, part of the implementation of address_space_read. */
+MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf,
+ int len, hwaddr addr1, hwaddr l,
+ MemoryRegion *mr);
+MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
+ MemTxAttrs attrs, uint8_t *buf, int len);
+void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
+
+static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
+{
+ if (is_write) {
+ return memory_region_is_ram(mr) && !mr->readonly;
+ } else {
+ return memory_region_is_ram(mr) || memory_region_is_romd(mr);
+ }
+}
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * Return a MemTxResult indicating whether the operation succeeded
+ * or failed (eg unassigned memory, device rejected the transaction,
+ * IOMMU fault).
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @attrs: memory transaction attributes
+ * @buf: buffer with the data transferred
+ */
+static inline __attribute__((__always_inline__))
+MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
+ uint8_t *buf, int len)
+{
+ MemTxResult result = MEMTX_OK;
+ hwaddr l, addr1;
+ void *ptr;
+ MemoryRegion *mr;
+
+ if (__builtin_constant_p(len)) {
+ if (len) {
+ rcu_read_lock();
+ l = len;
+ mr = address_space_translate(as, addr, &addr1, &l, false);
+ if (len == l && memory_access_is_direct(mr, false)) {
+ addr1 += memory_region_get_ram_addr(mr);
+ ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
+ memcpy(buf, ptr, len);
+ } else {
+ result = address_space_read_continue(as, addr, attrs, buf, len,
+ addr1, l, mr);
+ }
+ rcu_read_unlock();
+ }
+ } else {
+ result = address_space_read_full(as, addr, attrs, buf, len);
+ }
+ return result;
+}
+
#endif
#endif
diff --git a/qemu/include/exec/ram_addr.h b/qemu/include/exec/ram_addr.h
index c113f2114..5adf7a4fc 100644
--- a/qemu/include/exec/ram_addr.h
+++ b/qemu/include/exec/ram_addr.h
@@ -22,22 +22,93 @@
#ifndef CONFIG_USER_ONLY
#include "hw/xen/xen.h"
-ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
- bool share, const char *mem_path,
- Error **errp);
-ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
- MemoryRegion *mr, Error **errp);
-ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
-ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
- void (*resized)(const char*,
- uint64_t length,
- void *host),
- MemoryRegion *mr, Error **errp);
+struct RAMBlock {
+ struct rcu_head rcu;
+ struct MemoryRegion *mr;
+ uint8_t *host;
+ ram_addr_t offset;
+ ram_addr_t used_length;
+ ram_addr_t max_length;
+ void (*resized)(const char*, uint64_t length, void *host);
+ uint32_t flags;
+ /* Protected by iothread lock. */
+ char idstr[256];
+ /* RCU-enabled, writes protected by the ramlist lock */
+ QLIST_ENTRY(RAMBlock) next;
+ int fd;
+};
+
+static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
+{
+ return (b && b->host && offset < b->used_length) ? true : false;
+}
+
+static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
+{
+ assert(offset_in_ramblock(block, offset));
+ return (char *)block->host + offset;
+}
+
+/* The dirty memory bitmap is split into fixed-size blocks to allow growth
+ * under RCU. The bitmap for a block can be accessed as follows:
+ *
+ * rcu_read_lock();
+ *
+ * DirtyMemoryBlocks *blocks =
+ * atomic_rcu_read(&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]);
+ *
+ * ram_addr_t idx = (addr >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ * unsigned long *block = blocks.blocks[idx];
+ * ...access block bitmap...
+ *
+ * rcu_read_unlock();
+ *
+ * Remember to check for the end of the block when accessing a range of
+ * addresses. Move on to the next block if you reach the end.
+ *
+ * Organization into blocks allows dirty memory to grow (but not shrink) under
+ * RCU. When adding new RAMBlocks requires the dirty memory to grow, a new
+ * DirtyMemoryBlocks array is allocated with pointers to existing blocks kept
+ * the same. Other threads can safely access existing blocks while dirty
+ * memory is being grown. When no threads are using the old DirtyMemoryBlocks
+ * anymore it is freed by RCU (but the underlying blocks stay because they are
+ * pointed to from the new DirtyMemoryBlocks).
+ */
+#define DIRTY_MEMORY_BLOCK_SIZE ((ram_addr_t)256 * 1024 * 8)
+typedef struct {
+ struct rcu_head rcu;
+ unsigned long *blocks[];
+} DirtyMemoryBlocks;
+
+typedef struct RAMList {
+ QemuMutex mutex;
+ RAMBlock *mru_block;
+ /* RCU-enabled, writes protected by the ramlist lock. */
+ QLIST_HEAD(, RAMBlock) blocks;
+ DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
+ uint32_t version;
+} RAMList;
+extern RAMList ram_list;
+
+ram_addr_t last_ram_offset(void);
+void qemu_mutex_lock_ramlist(void);
+void qemu_mutex_unlock_ramlist(void);
+
+RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
+ bool share, const char *mem_path,
+ Error **errp);
+RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
+ MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
+RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
+ void (*resized)(const char*,
+ uint64_t length,
+ void *host),
+ MemoryRegion *mr, Error **errp);
int qemu_get_ram_fd(ram_addr_t addr);
+void qemu_set_ram_fd(ram_addr_t addr, int fd);
void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
-void *qemu_get_ram_ptr(ram_addr_t addr);
-void qemu_ram_free(ram_addr_t addr);
-void qemu_ram_free_from_ptr(ram_addr_t addr);
+void qemu_ram_free(RAMBlock *block);
int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
@@ -48,30 +119,82 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
- unsigned long end, page, next;
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = false;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- next = find_next_bit(ram_list.dirty_memory[client], end, page);
- return next < end;
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_bit(blocks->blocks[idx], num, offset);
+ if (found < num) {
+ dirty = true;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+
+ rcu_read_unlock();
+
+ return dirty;
}
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
- unsigned long end, page, next;
+ DirtyMemoryBlocks *blocks;
+ unsigned long end, page;
+ unsigned long idx, offset, base;
+ bool dirty = true;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
- return next >= end;
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned long num = next - base;
+ unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
+ if (found < num) {
+ dirty = false;
+ break;
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
+ }
+
+ rcu_read_unlock();
+
+ return dirty;
}
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
@@ -113,28 +236,73 @@ static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
+ unsigned long page, idx, offset;
+ DirtyMemoryBlocks *blocks;
+
assert(client < DIRTY_MEMORY_NUM);
- set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
+
+ page = addr >> TARGET_PAGE_BITS;
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+
+ rcu_read_lock();
+
+ blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+
+ set_bit_atomic(offset, blocks->blocks[idx]);
+
+ rcu_read_unlock();
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
+ DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
unsigned long end, page;
- unsigned long **d = ram_list.dirty_memory;
+ unsigned long idx, offset, base;
+ int i;
+
+ if (!mask && !xen_enabled()) {
+ return;
+ }
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
- if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
- }
- if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
+
+ rcu_read_lock();
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
}
- if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
- bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
+
+ idx = page / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = page % DIRTY_MEMORY_BLOCK_SIZE;
+ base = page - offset;
+ while (page < end) {
+ unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
+
+ if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
+ offset, next - page);
+ }
+ if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
+ bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
+ offset, next - page);
+ }
+
+ page = next;
+ idx++;
+ offset = 0;
+ base += DIRTY_MEMORY_BLOCK_SIZE;
}
+
+ rcu_read_unlock();
+
xen_modified_memory(start, length);
}
@@ -154,21 +322,41 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
/* start address is aligned at the start of a word? */
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
+ unsigned long **blocks[DIRTY_MEMORY_NUM];
+ unsigned long idx;
+ unsigned long offset;
long k;
long nr = BITS_TO_LONGS(pages);
+ idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
+ offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ rcu_read_lock();
+
+ for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
+ blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
+ }
+
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
- unsigned long **d = ram_list.dirty_memory;
- atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
- atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (tcg_enabled()) {
- atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
+ atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
}
}
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
+
+ rcu_read_unlock();
+
xen_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
@@ -220,18 +408,33 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
- unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
+ unsigned long * const *src;
+ unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
+ DIRTY_MEMORY_BLOCK_SIZE);
+
+ rcu_read_lock();
+
+ src = atomic_rcu_read(
+ &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
- if (src[k]) {
- unsigned long bits = atomic_xchg(&src[k], 0);
+ if (src[idx][offset]) {
+ unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
+
+ if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
+ offset = 0;
+ idx++;
+ }
}
+
+ rcu_read_unlock();
} else {
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
@@ -249,5 +452,6 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
return num_dirty;
}
+void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
#endif
#endif
diff --git a/qemu/include/exec/softmmu-semi.h b/qemu/include/exec/softmmu-semi.h
index 1819cc249..3a58c3f08 100644
--- a/qemu/include/exec/softmmu-semi.h
+++ b/qemu/include/exec/softmmu-semi.h
@@ -9,6 +9,14 @@
#ifndef SOFTMMU_SEMI_H
#define SOFTMMU_SEMI_H 1
+static inline uint64_t softmmu_tget64(CPUArchState *env, target_ulong addr)
+{
+ uint64_t val;
+
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 0);
+ return tswap64(val);
+}
+
static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
{
uint32_t val;
@@ -16,6 +24,7 @@ static inline uint32_t softmmu_tget32(CPUArchState *env, target_ulong addr)
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 0);
return tswap32(val);
}
+
static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
{
uint8_t val;
@@ -24,16 +33,25 @@ static inline uint32_t softmmu_tget8(CPUArchState *env, target_ulong addr)
return val;
}
+#define get_user_u64(arg, p) ({ arg = softmmu_tget64(env, p); 0; })
#define get_user_u32(arg, p) ({ arg = softmmu_tget32(env, p) ; 0; })
#define get_user_u8(arg, p) ({ arg = softmmu_tget8(env, p) ; 0; })
#define get_user_ual(arg, p) get_user_u32(arg, p)
+static inline void softmmu_tput64(CPUArchState *env,
+ target_ulong addr, uint64_t val)
+{
+ val = tswap64(val);
+ cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 1);
+}
+
static inline void softmmu_tput32(CPUArchState *env,
target_ulong addr, uint32_t val)
{
val = tswap32(val);
cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 1);
}
+#define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; })
#define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; })
#define put_user_ual(arg, p) put_user_u32(arg, p)
diff --git a/qemu/include/exec/spinlock.h b/qemu/include/exec/spinlock.h
deleted file mode 100644
index a72edda1d..000000000
--- a/qemu/include/exec/spinlock.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>
- */
-
-/* configure guarantees us that we have pthreads on any host except
- * mingw32, which doesn't support any of the user-only targets.
- * So we can simply assume we have pthread mutexes here.
- */
-#if defined(CONFIG_USER_ONLY)
-
-#include <pthread.h>
-#define spin_lock pthread_mutex_lock
-#define spin_unlock pthread_mutex_unlock
-#define spinlock_t pthread_mutex_t
-#define SPIN_LOCK_UNLOCKED PTHREAD_MUTEX_INITIALIZER
-
-#else
-
-/* Empty implementations, on the theory that system mode emulation
- * is single-threaded. This means that these functions should only
- * be used from code run in the TCG cpu thread, and cannot protect
- * data structures which might also be accessed from the IO thread
- * or from signal handlers.
- */
-typedef int spinlock_t;
-#define SPIN_LOCK_UNLOCKED 0
-
-static inline void spin_lock(spinlock_t *lock)
-{
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
-}
-
-#endif
diff --git a/qemu/include/exec/user/thunk.h b/qemu/include/exec/user/thunk.h
index 3b6746272..ad1d60266 100644
--- a/qemu/include/exec/user/thunk.h
+++ b/qemu/include/exec/user/thunk.h
@@ -19,7 +19,6 @@
#ifndef THUNK_H
#define THUNK_H
-#include <inttypes.h>
#include "cpu.h"
/* types enums definitions */