summaryrefslogtreecommitdiffstats
path: root/qemu/tcg/aarch64
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/tcg/aarch64
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/tcg/aarch64')
-rw-r--r--qemu/tcg/aarch64/tcg-target.h3
-rw-r--r--qemu/tcg/aarch64/tcg-target.inc.c (renamed from qemu/tcg/aarch64/tcg-target.c)108
2 files changed, 68 insertions, 43 deletions
diff --git a/qemu/tcg/aarch64/tcg-target.h b/qemu/tcg/aarch64/tcg-target.h
index 8aec04d2b..19a04a6e7 100644
--- a/qemu/tcg/aarch64/tcg-target.h
+++ b/qemu/tcg/aarch64/tcg-target.h
@@ -70,7 +70,8 @@ typedef enum {
#define TCG_TARGET_HAS_muls2_i32 0
#define TCG_TARGET_HAS_muluh_i32 0
#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_trunc_shr_i32 0
+#define TCG_TARGET_HAS_extrl_i64_i32 0
+#define TCG_TARGET_HAS_extrh_i64_i32 0
#define TCG_TARGET_HAS_div_i64 1
#define TCG_TARGET_HAS_rem_i64 1
diff --git a/qemu/tcg/aarch64/tcg-target.c b/qemu/tcg/aarch64/tcg-target.inc.c
index b7ec4f5ac..a8fb4420d 100644
--- a/qemu/tcg/aarch64/tcg-target.c
+++ b/qemu/tcg/aarch64/tcg-target.inc.c
@@ -18,19 +18,19 @@
makes things much cleaner. */
QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
-#ifndef NDEBUG
+#ifdef CONFIG_DEBUG_TCG
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7",
"%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15",
"%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23",
"%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp",
};
-#endif /* NDEBUG */
+#endif /* CONFIG_DEBUG_TCG */
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23,
TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27,
- TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */
+ TCG_REG_X28, /* we will reserve this for guest_base if configured */
TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11,
TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15,
@@ -56,17 +56,18 @@ static const int tcg_target_call_oarg_regs[1] = {
#define TCG_REG_TMP TCG_REG_X30
#ifndef CONFIG_SOFTMMU
-# ifdef CONFIG_USE_GUEST_BASE
-# define TCG_REG_GUEST_BASE TCG_REG_X28
-# else
-# define TCG_REG_GUEST_BASE TCG_REG_XZR
-# endif
+/* Note that XZR cannot be encoded in the address base register slot,
+ as that actaully encodes SP. So if we need to zero-extend the guest
+ address, via the address index register slot, we need to load even
+ a zero guest base into a register. */
+#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32)
+#define TCG_REG_GUEST_BASE TCG_REG_X28
#endif
static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
ptrdiff_t offset = target - code_ptr;
- assert(offset == sextract64(offset, 0, 26));
+ tcg_debug_assert(offset == sextract64(offset, 0, 26));
/* read instruction, mask away previous PC_REL26 parameter contents,
set the proper offset, then write back the instruction. */
*code_ptr = deposit32(*code_ptr, 0, 26, offset);
@@ -75,14 +76,14 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
{
ptrdiff_t offset = target - code_ptr;
- assert(offset == sextract64(offset, 0, 19));
+ tcg_debug_assert(offset == sextract64(offset, 0, 19));
*code_ptr = deposit32(*code_ptr, 5, 19, offset);
}
static inline void patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
- assert(addend == 0);
+ tcg_debug_assert(addend == 0);
switch (type) {
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
@@ -401,7 +402,7 @@ static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn,
insn |= pre << 24;
insn |= w << 23;
- assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
+ tcg_debug_assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0);
insn |= (ofs & (0x7f << 3)) << (15 - 3);
tcg_out32(s, insn | r2 << 10 | rn << 5 | r1);
@@ -411,9 +412,9 @@ static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext,
TCGReg rd, TCGReg rn, uint64_t aimm)
{
if (aimm > 0xfff) {
- assert((aimm & 0xfff) == 0);
+ tcg_debug_assert((aimm & 0xfff) == 0);
aimm >>= 12;
- assert(aimm <= 0xfff);
+ tcg_debug_assert(aimm <= 0xfff);
aimm |= 1 << 12; /* apply LSL 12 */
}
tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd);
@@ -443,7 +444,7 @@ static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext,
static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext,
TCGReg rd, uint16_t half, unsigned shift)
{
- assert((shift & ~0x30) == 0);
+ tcg_debug_assert((shift & ~0x30) == 0);
tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd);
}
@@ -537,7 +538,7 @@ static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext,
{
unsigned h, l, r, c;
- assert(is_limm(limm));
+ tcg_debug_assert(is_limm(limm));
h = clz64(limm);
l = ctz64(limm);
@@ -792,7 +793,7 @@ static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target)
{
ptrdiff_t offset = target - s->code_ptr;
- assert(offset == sextract64(offset, 0, 26));
+ tcg_debug_assert(offset == sextract64(offset, 0, 26));
tcg_out_insn(s, 3206, B, offset);
}
@@ -866,7 +867,7 @@ static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a,
offset = tcg_in32(s) >> 5;
} else {
offset = l->u.value_ptr - s->code_ptr;
- assert(offset == sextract64(offset, 0, 19));
+ tcg_debug_assert(offset == sextract64(offset, 0, 19));
}
if (need_cmp) {
@@ -989,7 +990,7 @@ static void * const qemu_st_helpers[16] = {
static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
{
ptrdiff_t offset = tcg_pcrel_diff(s, target);
- assert(offset == sextract64(offset, 0, 21));
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
tcg_out_insn(s, 3406, ADR, rd, offset);
}
@@ -1051,14 +1052,29 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
slow path for the failure case, which will be patched later when finalizing
the slow path. Generated code returns the host addend in X1,
clobbers X0,X2,X3,TMP. */
-static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
+static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp opc,
tcg_insn_unit **label_ptr, int mem_index,
bool is_read)
{
- TCGReg base = TCG_AREG0;
int tlb_offset = is_read ?
offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
+ int s_mask = (1 << (opc & MO_SIZE)) - 1;
+ TCGReg base = TCG_AREG0, x3;
+ uint64_t tlb_mask;
+
+ /* For aligned accesses, we check the first byte and include the alignment
+ bits within the address. For unaligned access, we check that we don't
+ cross pages using the address of the last byte of the access. */
+ if ((opc & MO_AMASK) == MO_ALIGN || s_mask == 0) {
+ tlb_mask = TARGET_PAGE_MASK | s_mask;
+ x3 = addr_reg;
+ } else {
+ tcg_out_insn(s, 3401, ADDI, TARGET_LONG_BITS == 64,
+ TCG_REG_X3, addr_reg, s_mask);
+ tlb_mask = TARGET_PAGE_MASK;
+ x3 = TCG_REG_X3;
+ }
/* Extract the TLB index from the address into X0.
X0<CPU_TLB_BITS:0> =
@@ -1066,11 +1082,9 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits,
tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg,
TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS);
- /* Store the page mask part of the address and the low s_bits into X3.
- Later this allows checking for equality and alignment at the same time.
- X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */
- tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3,
- addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
+ /* Store the page mask part of the address into X3. */
+ tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64,
+ TCG_REG_X3, x3, tlb_mask);
/* Add any "high bits" from the tlb offset to the env address into X2,
to take advantage of the LSL12 form of the ADDI instruction.
@@ -1207,18 +1221,21 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
- TCGMemOp s_bits = memop & MO_SIZE;
tcg_insn_unit *label_ptr;
- tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
TCG_REG_X1, otype, addr_reg);
add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
- tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
- GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
- otype, addr_reg);
+ if (USE_GUEST_BASE) {
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
+ TCG_REG_GUEST_BASE, otype, addr_reg);
+ } else {
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
+ addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+ }
#endif /* CONFIG_SOFTMMU */
}
@@ -1229,18 +1246,21 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
#ifdef CONFIG_SOFTMMU
unsigned mem_index = get_mmuidx(oi);
- TCGMemOp s_bits = memop & MO_SIZE;
tcg_insn_unit *label_ptr;
- tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
tcg_out_qemu_st_direct(s, memop, data_reg,
TCG_REG_X1, otype, addr_reg);
- add_qemu_ldst_label(s, false, oi, s_bits == MO_64, data_reg, addr_reg,
- s->code_ptr, label_ptr);
+ add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
+ data_reg, addr_reg, s->code_ptr, label_ptr);
#else /* !CONFIG_SOFTMMU */
- tcg_out_qemu_st_direct(s, memop, data_reg,
- GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR,
- otype, addr_reg);
+ if (USE_GUEST_BASE) {
+ tcg_out_qemu_st_direct(s, memop, data_reg,
+ TCG_REG_GUEST_BASE, otype, addr_reg);
+ } else {
+ tcg_out_qemu_st_direct(s, memop, data_reg,
+ addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+ }
#endif /* CONFIG_SOFTMMU */
}
@@ -1274,7 +1294,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
#ifndef USE_DIRECT_JUMP
#error "USE_DIRECT_JUMP required for aarch64"
#endif
- assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
+ tcg_debug_assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */
s->tb_jmp_offset[a0] = tcg_current_code_size(s);
/* actual branch destination will be patched by
aarch64_tb_set_jmp_target later, beware retranslation. */
@@ -1556,6 +1576,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_ext16s_i32:
tcg_out_sxt(s, ext, MO_16, a0, a1);
break;
+ case INDEX_op_ext_i32_i64:
case INDEX_op_ext32s_i64:
tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1);
break;
@@ -1567,6 +1588,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_ext16u_i32:
tcg_out_uxt(s, MO_16, a0, a1);
break;
+ case INDEX_op_extu_i32_i64:
case INDEX_op_ext32u_i64:
tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
break;
@@ -1712,6 +1734,8 @@ static const TCGTargetOpDef aarch64_op_defs[] = {
{ INDEX_op_ext8u_i64, { "r", "r" } },
{ INDEX_op_ext16u_i64, { "r", "r" } },
{ INDEX_op_ext32u_i64, { "r", "r" } },
+ { INDEX_op_ext_i32_i64, { "r", "r" } },
+ { INDEX_op_extu_i32_i64, { "r", "r" } },
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
{ INDEX_op_deposit_i64, { "r", "0", "rZ" } },
@@ -1794,9 +1818,9 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
CPU_TEMP_BUF_NLONGS * sizeof(long));
-#if defined(CONFIG_USE_GUEST_BASE)
- if (GUEST_BASE) {
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE);
+#if !defined(CONFIG_SOFTMMU)
+ if (USE_GUEST_BASE) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
}
#endif