summaryrefslogtreecommitdiffstats
path: root/qemu/include/exec/exec-all.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/include/exec/exec-all.h
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/include/exec/exec-all.h')
-rw-r--r--qemu/include/exec/exec-all.h213
1 files changed, 155 insertions, 58 deletions
diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h
index a6fce04f6..736209505 100644
--- a/qemu/include/exec/exec-all.h
+++ b/qemu/include/exec/exec-all.h
@@ -62,26 +62,16 @@ typedef struct TranslationBlock TranslationBlock;
#define OPC_BUF_SIZE 640
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
-/* Maximum size a TCG op can expand to. This is complicated because a
- single op may require several host instructions and register reloads.
- For now take a wild guess at 192 bytes, which should allow at least
- a couple of fixup instructions per argument. */
-#define TCG_MAX_OP_SIZE 192
-
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
#include "qemu/log.h"
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
-void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
- int pc_pos);
+ target_ulong *data);
void cpu_gen_init(void);
-int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
- int *gen_code_size_ptr);
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
-void page_size_init(void);
void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
@@ -90,20 +80,113 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
int cflags);
void cpu_exec_init(CPUState *cpu, Error **errp);
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
+void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
#if !defined(CONFIG_USER_ONLY)
-bool qemu_in_vcpu_thread(void);
-void cpu_reload_memory_map(CPUState *cpu);
-void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
+void cpu_reloading_memory_map(void);
+/**
+ * cpu_address_space_init:
+ * @cpu: CPU to add this address space to
+ * @as: address space to add
+ * @asidx: integer index of this address space
+ *
+ * Add the specified address space to the CPU's cpu_ases list.
+ * The address space added with @asidx 0 is the one used for the
+ * convenience pointer cpu->as.
+ * The target-specific code which registers ASes is responsible
+ * for defining what semantics address space 0, 1, 2, etc have.
+ *
+ * Before the first call to this function, the caller must set
+ * cpu->num_ases to the total number of address spaces it needs
+ * to support.
+ *
+ * Note that with KVM only one address space is supported.
+ */
+void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
+/**
+ * cpu_get_address_space:
+ * @cpu: CPU to get address space from
+ * @asidx: index identifying which address space to get
+ *
+ * Return the requested address space of this CPU. @asidx
+ * specifies which address space to read.
+ */
+AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
/* cputlb.c */
+/**
+ * tlb_flush_page:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ *
+ * Flush one page from the TLB of the specified CPU, for all
+ * MMU indexes.
+ */
void tlb_flush_page(CPUState *cpu, target_ulong addr);
+/**
+ * tlb_flush:
+ * @cpu: CPU whose TLB should be flushed
+ * @flush_global: ignored
+ *
+ * Flush the entire TLB for the specified CPU.
+ * The flush_global flag is in theory an indicator of whether the whole
+ * TLB should be flushed, or only those entries not marked global.
+ * In practice QEMU does not implement any global/not global flag for
+ * TLB entries, and the argument is ignored.
+ */
void tlb_flush(CPUState *cpu, int flush_global);
-void tlb_set_page(CPUState *cpu, target_ulong vaddr,
- hwaddr paddr, int prot,
- int mmu_idx, target_ulong size);
+/**
+ * tlb_flush_page_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of page to be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush one page from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
+/**
+ * tlb_flush_by_mmuidx:
+ * @cpu: CPU whose TLB should be flushed
+ * @...: list of MMU indexes to flush, terminated by a negative value
+ *
+ * Flush all entries from the TLB of the specified CPU, for the specified
+ * MMU indexes.
+ */
+void tlb_flush_by_mmuidx(CPUState *cpu, ...);
+/**
+ * tlb_set_page_with_attrs:
+ * @cpu: CPU to add this TLB entry for
+ * @vaddr: virtual address of page to add entry for
+ * @paddr: physical address of the page
+ * @attrs: memory transaction attributes
+ * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
+ * @mmu_idx: MMU index to insert TLB entry for
+ * @size: size of the page in bytes
+ *
+ * Add an entry to this CPU's TLB (a mapping from virtual address
+ * @vaddr to physical address @paddr) with the specified memory
+ * transaction attributes. This is generally called by the target CPU
+ * specific code after it has been called through the tlb_fill()
+ * entry point and performed a successful page table walk to find
+ * the physical address and attributes for the virtual address
+ * which provoked the TLB miss.
+ *
+ * At most one entry for a given virtual address is permitted. Only a
+ * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
+ * used by tlb_flush_page.
+ */
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, MemTxAttrs attrs,
int prot, int mmu_idx, target_ulong size);
+/* tlb_set_page:
+ *
+ * This function is equivalent to calling tlb_set_page_with_attrs()
+ * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
+ * as a convenience for CPUs which don't use memory transaction attributes.
+ */
+void tlb_set_page(CPUState *cpu, target_ulong vaddr,
+ hwaddr paddr, int prot,
+ int mmu_idx, target_ulong size);
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
uintptr_t retaddr);
@@ -115,6 +198,15 @@ static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
static inline void tlb_flush(CPUState *cpu, int flush_global)
{
}
+
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
+ target_ulong addr, ...)
+{
+}
+
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
+{
+}
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
@@ -122,13 +214,14 @@ static inline void tlb_flush(CPUState *cpu, int flush_global)
#define CODE_GEN_PHYS_HASH_BITS 15
#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
-/* estimated block size for TB allocation */
-/* XXX: use a per code average code fragment size and modulate it
- according to the host CPU */
+/* Estimated block size for TB allocation. */
+/* ??? The following is based on a 2015 survey of x86_64 host output.
+ Better would seem to be some sort of dynamically sized TB array,
+ adapting to the block sizes actually being produced. */
#if defined(CONFIG_SOFTMMU)
-#define CODE_GEN_AVG_BLOCK_SIZE 128
+#define CODE_GEN_AVG_BLOCK_SIZE 400
#else
-#define CODE_GEN_AVG_BLOCK_SIZE 64
+#define CODE_GEN_AVG_BLOCK_SIZE 150
#endif
#if defined(__arm__) || defined(_ARCH_PPC) \
@@ -151,10 +244,14 @@ struct TranslationBlock {
#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
#define CF_NOCACHE 0x10000 /* To be freed after execution */
#define CF_USE_ICOUNT 0x20000
+#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
void *tc_ptr; /* pointer to the translated code */
+ uint8_t *tc_search; /* pointer to search data */
/* next matching tb for physical address. */
struct TranslationBlock *phys_hash_next;
+ /* original tb when cflags has CF_NOCACHE */
+ struct TranslationBlock *orig_tb;
/* first and second physical page containing code. The lower bit
of the pointer tells the index in page_next[] */
struct TranslationBlock *page_next[2];
@@ -176,7 +273,7 @@ struct TranslationBlock {
struct TranslationBlock *jmp_first;
};
-#include "exec/spinlock.h"
+#include "qemu/thread.h"
typedef struct TBContext TBContext;
@@ -186,7 +283,7 @@ struct TBContext {
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
int nb_tbs;
/* any access to the tbs or the page table must use this lock */
- spinlock_t tb_lock;
+ QemuMutex tb_lock;
/* statistics */
int tb_flush_count;
@@ -282,6 +379,11 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
+ qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
+ "Linking TBs %p [" TARGET_FMT_lx
+ "] index %d -> %p [" TARGET_FMT_lx "]\n",
+ tb->tc_ptr, tb->pc, n,
+ tb_next->tc_ptr, tb_next->pc);
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
@@ -308,20 +410,14 @@ extern uintptr_t tci_tb_ptr;
to indicate the compressed mode; subtracting two works around that. It
is also the case that there are no host isas that contain a call insn
smaller than 4 bytes, so we don't worry about special-casing this. */
-#if defined(CONFIG_TCG_INTERPRETER)
-# define GETPC_ADJ 0
-#else
-# define GETPC_ADJ 2
-#endif
+#define GETPC_ADJ 2
#define GETPC() (GETRA() - GETPC_ADJ)
#if !defined(CONFIG_USER_ONLY)
-void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
-
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
- hwaddr index);
+ hwaddr index, MemTxAttrs attrs);
void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
@@ -329,43 +425,44 @@ void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
#endif
#if defined(CONFIG_USER_ONLY)
+void mmap_lock(void);
+void mmap_unlock(void);
+
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
{
return addr;
}
#else
+static inline void mmap_lock(void) {}
+static inline void mmap_unlock(void) {}
+
/* cputlb.c */
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
+
+void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
+void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);
+
+/* exec.c */
+void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
+
+MemoryRegionSection *
+address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
+ hwaddr *xlat, hwaddr *plen);
+hwaddr memory_region_section_get_iotlb(CPUState *cpu,
+ MemoryRegionSection *section,
+ target_ulong vaddr,
+ hwaddr paddr, hwaddr xlat,
+ int prot,
+ target_ulong *address);
+bool memory_region_is_unassigned(MemoryRegion *mr);
+
#endif
/* vl.c */
extern int singlestep;
-/* cpu-exec.c */
-extern volatile sig_atomic_t exit_request;
+/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
+extern CPUState *tcg_current_cpu;
+extern bool exit_request;
-/**
- * cpu_can_do_io:
- * @cpu: The CPU for which to check IO.
- *
- * Deterministic execution requires that IO only be performed on the last
- * instruction of a TB so that interrupts take effect immediately.
- *
- * Returns: %true if memory-mapped IO is safe, %false otherwise.
- */
-static inline bool cpu_can_do_io(CPUState *cpu)
-{
- if (!use_icount) {
- return true;
- }
- /* If not executing code then assume we are ok. */
- if (cpu->current_tb == NULL) {
- return true;
- }
- return cpu->can_do_io != 0;
-}
-
-#if !defined(CONFIG_USER_ONLY)
-void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
-#endif
#endif