summaryrefslogtreecommitdiffstats
path: root/qemu/target-arm/cpu.h
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/target-arm/cpu.h
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/target-arm/cpu.h')
-rw-r--r--qemu/target-arm/cpu.h392
1 files changed, 326 insertions, 66 deletions
diff --git a/qemu/target-arm/cpu.h b/qemu/target-arm/cpu.h
index 7e89152bd..066ff678d 100644
--- a/qemu/target-arm/cpu.h
+++ b/qemu/target-arm/cpu.h
@@ -19,17 +19,14 @@
#ifndef CPU_ARM_H
#define CPU_ARM_H
-#include "config.h"
#include "kvm-consts.h"
#if defined(TARGET_AARCH64)
/* AArch64 definitions */
# define TARGET_LONG_BITS 64
-# define ELF_MACHINE EM_AARCH64
#else
# define TARGET_LONG_BITS 32
-# define ELF_MACHINE EM_ARM
#endif
#define TARGET_IS_BIENDIAN 1
@@ -56,6 +53,7 @@
#define EXCP_SMC 13 /* Secure Monitor Call */
#define EXCP_VIRQ 14
#define EXCP_VFIQ 15
+#define EXCP_SEMIHOST 16 /* semihosting call (A64 only) */
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -96,6 +94,7 @@
struct arm_boot_info;
#define NB_MMU_MODES 7
+#define TARGET_INSN_START_EXTRA_WORDS 1
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -113,7 +112,9 @@ typedef struct ARMGenericTimer {
#define GTIMER_PHYS 0
#define GTIMER_VIRT 1
-#define NUM_GTIMERS 2
+#define GTIMER_HYP 2
+#define GTIMER_SEC 3
+#define NUM_GTIMERS 4
typedef struct {
uint64_t raw_tcr;
@@ -170,7 +171,7 @@ typedef struct CPUARMState {
uint32_t GE; /* cpsr[19:16] */
uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
- uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
+ uint64_t daif; /* exception masks, in the bits they are in PSTATE */
uint64_t elr_el[4]; /* AArch64 exception link regs */
uint64_t sp_el[4]; /* AArch64 banked stack pointers */
@@ -219,10 +220,12 @@ typedef struct CPUARMState {
};
uint64_t ttbr1_el[4];
};
+ uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
/* MMU translation table base control. */
TCR tcr_el[4];
- uint32_t c2_data; /* MPU data cachable bits. */
- uint32_t c2_insn; /* MPU instruction cachable bits. */
+ TCR vtcr_el2; /* Virtualization Translation Control. */
+ uint32_t c2_data; /* MPU data cacheable bits. */
+ uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
* MPU write buffer control.
*/
@@ -275,6 +278,7 @@ typedef struct CPUARMState {
};
uint64_t far_el[4];
};
+ uint64_t hpfar_el2;
union { /* Translation result. */
struct {
uint64_t _unused_par_0;
@@ -358,6 +362,8 @@ typedef struct CPUARMState {
};
uint64_t c14_cntfrq; /* Counter Frequency register */
uint64_t c14_cntkctl; /* Timer Control register */
+ uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
+ uint64_t cntvoff_el2; /* Counter Virtual Offset register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
uint32_t c15_cpar; /* XScale Coprocessor Access Register */
uint32_t c15_ticonfig; /* TI925T configuration byte. */
@@ -373,11 +379,16 @@ typedef struct CPUARMState {
uint64_t dbgwvr[16]; /* watchpoint value registers */
uint64_t dbgwcr[16]; /* watchpoint control registers */
uint64_t mdscr_el1;
+ uint64_t oslsr_el1; /* OS Lock Status */
+ uint64_t mdcr_el2;
+ uint64_t mdcr_el3;
/* If the counter is enabled, this stores the last time the counter
* was reset. Otherwise it stores the counter value
*/
uint64_t c15_ccnt;
uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
+ uint64_t vpidr_el2; /* Virtualization Processor ID Register */
+ uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
} cp15;
struct {
@@ -467,9 +478,6 @@ typedef struct CPUARMState {
uint32_t cregs[16];
} iwmmxt;
- /* For mixed endian mode. */
- bool bswap_code;
-
#if defined(CONFIG_USER_ONLY)
/* For usermode syscall translation. */
int eabi;
@@ -500,7 +508,7 @@ typedef struct CPUARMState {
ARMCPU *cpu_arm_init(const char *cpu_model);
int cpu_arm_exec(CPUState *cpu);
-uint32_t do_arm_semihosting(CPUARMState *env);
+target_ulong do_arm_semihosting(CPUARMState *env);
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -583,6 +591,22 @@ void pmccntr_sync(CPUARMState *env);
#define CPTR_TTA (1U << 20)
#define CPTR_TFP (1U << 10)
+#define MDCR_EPMAD (1U << 21)
+#define MDCR_EDAD (1U << 20)
+#define MDCR_SPME (1U << 17)
+#define MDCR_SDD (1U << 16)
+#define MDCR_SPD (3U << 14)
+#define MDCR_TDRA (1U << 11)
+#define MDCR_TDOSA (1U << 10)
+#define MDCR_TDA (1U << 9)
+#define MDCR_TDE (1U << 8)
+#define MDCR_HPME (1U << 7)
+#define MDCR_TPM (1U << 6)
+#define MDCR_TPMCR (1U << 5)
+
+/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
+#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
+
#define CPSR_M (0x1fU)
#define CPSR_T (1U << 5)
#define CPSR_F (1U << 6)
@@ -695,8 +719,17 @@ static inline void pstate_write(CPUARMState *env, uint32_t val)
/* Return the current CPSR value. */
uint32_t cpsr_read(CPUARMState *env);
-/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
-void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask);
+
+typedef enum CPSRWriteType {
+ CPSRWriteByInstr = 0, /* from guest MSR or CPS */
+ CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
+ CPSRWriteRaw = 2, /* trust values, do not switch reg banks */
+ CPSRWriteByGDBStub = 3, /* from the GDB stub */
+} CPSRWriteType;
+
+/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type);
/* Return the current xPSR value. */
static inline uint32_t xpsr_read(CPUARMState *env)
@@ -920,7 +953,7 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env)
if (arm_feature(env, ARM_FEATURE_EL3)) {
return !(env->cp15.scr_el3 & SCR_NS);
} else {
- /* If EL2 is not supported then the secure state is implementation
+ /* If EL3 is not supported then the secure state is implementation
* defined, in which case QEMU defaults to non-secure.
*/
return false;
@@ -958,18 +991,33 @@ static inline bool arm_is_secure(CPUARMState *env)
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
{
- /* We don't currently support EL2, and this isn't valid for EL0
- * (if we're in EL0, is_a64() is what you want, and if we're not in EL0
- * then the state of EL0 isn't well defined.)
+ /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
+ * and if we're not in EL0 then the state of EL0 isn't well defined.)
*/
- assert(el == 1 || el == 3);
+ assert(el >= 1 && el <= 3);
+ bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
- /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This
- * is a QEMU-imposed simplification which we may wish to change later.
- * If we in future support EL2 and/or EL3, then the state of lower
- * exception levels is controlled by the HCR.RW and SCR.RW bits.
+ /* The highest exception level is always at the maximum supported
+ * register width, and then lower levels have a register width controlled
+ * by bits in the SCR or HCR registers.
*/
- return arm_feature(env, ARM_FEATURE_AARCH64);
+ if (el == 3) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
+ }
+
+ if (el == 2) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
+ aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
+ }
+
+ return aa64;
}
/* Function for determing whether guest cp register reads and writes should
@@ -1008,11 +1056,11 @@ static inline bool access_secure_reg(CPUARMState *env)
*/
#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
A32_BANKED_REG_GET((_env), _regname, \
- ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))))
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
A32_BANKED_REG_SET((_env), _regname, \
- ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))), \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
(_val))
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
@@ -1228,6 +1276,18 @@ static inline bool cptype_valid(int cptype)
#define PL1_RW (PL1_R | PL1_W)
#define PL0_RW (PL0_R | PL0_W)
+/* Return the highest implemented Exception Level */
+static inline int arm_highest_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return 3;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ return 2;
+ }
+ return 1;
+}
+
/* Return the current Exception Level (as per ARMv8; note that this differs
* from the ARMv7 Privilege Level).
*/
@@ -1280,6 +1340,14 @@ typedef enum CPAccessResult {
/* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
CP_ACCESS_TRAP_EL2 = 3,
CP_ACCESS_TRAP_EL3 = 4,
+ /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
+ /* Access fails and results in an exception syndrome for an FP access,
+ * trapped directly to EL2 or EL3
+ */
+ CP_ACCESS_TRAP_FP_EL2 = 7,
+ CP_ACCESS_TRAP_FP_EL3 = 8,
} CPAccessResult;
/* Access functions for coprocessor registers. These cannot fail and
@@ -1289,7 +1357,9 @@ typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
uint64_t value);
/* Access permission check functions for coprocessor registers. */
-typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque);
+typedef CPAccessResult CPAccessFn(CPUARMState *env,
+ const ARMCPRegInfo *opaque,
+ bool isread);
/* Hook function for register reset */
typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
@@ -1445,6 +1515,9 @@ static inline bool cp_access_ok(int current_el,
return (ri->access >> ((current_el * 2) + isread)) & 1;
}
+/* Raw read of a coprocessor register (as needed for migration, etc) */
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
+
/**
* write_list_to_cpustate
* @cpu: ARMCPU
@@ -1477,7 +1550,7 @@ bool write_list_to_cpustate(ARMCPU *cpu);
*/
bool write_cpustate_to_list(ARMCPU *cpu);
-/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3.
+/* Does the core conform to the "MicroController" profile. e.g. Cortex-M3.
Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
conventional cores (ie. Application or Realtime profile). */
@@ -1509,8 +1582,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
CPUARMState *env = cs->env_ptr;
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
- uint32_t scr;
- uint32_t hcr;
bool pstate_unmasked;
int8_t unmasked = 0;
@@ -1524,31 +1595,10 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
switch (excp_idx) {
case EXCP_FIQ:
- /* If FIQs are routed to EL3 or EL2 then there are cases where we
- * override the CPSR.F in determining if the exception is masked or
- * not. If neither of these are set then we fall back to the CPSR.F
- * setting otherwise we further assess the state below.
- */
- hcr = (env->cp15.hcr_el2 & HCR_FMO);
- scr = (env->cp15.scr_el3 & SCR_FIQ);
-
- /* When EL3 is 32-bit, the SCR.FW bit controls whether the CPSR.F bit
- * masks FIQ interrupts when taken in non-secure state. If SCR.FW is
- * set then FIQs can be masked by CPSR.F when non-secure but only
- * when FIQs are only routed to EL3.
- */
- scr &= !((env->cp15.scr_el3 & SCR_FW) && !hcr);
pstate_unmasked = !(env->daif & PSTATE_F);
break;
case EXCP_IRQ:
- /* When EL3 execution state is 32-bit, if HCR.IMO is set then we may
- * override the CPSR.I masking when in non-secure state. The SCR.IRQ
- * setting has already been taken into consideration when setting the
- * target EL, so it does not have a further affect here.
- */
- hcr = (env->cp15.hcr_el2 & HCR_IMO);
- scr = false;
pstate_unmasked = !(env->daif & PSTATE_I);
break;
@@ -1573,8 +1623,58 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* interrupt.
*/
if ((target_el > cur_el) && (target_el != 1)) {
- if (arm_el_is_aa64(env, 3) || ((scr || hcr) && (!secure))) {
- unmasked = 1;
+ /* Exceptions targeting a higher EL may not be maskable */
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 64-bit masking rules are simple: exceptions to EL3
+ * can't be masked, and exceptions to EL2 can only be
+ * masked from Secure state. The HCR and SCR settings
+ * don't affect the masking logic, only the interrupt routing.
+ */
+ if (target_el == 3 || !secure) {
+ unmasked = 1;
+ }
+ } else {
+ /* The old 32-bit-only environment has a more complicated
+ * masking setup. HCR and SCR bits not only affect interrupt
+ * routing but also change the behaviour of masking.
+ */
+ bool hcr, scr;
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ /* If FIQs are routed to EL3 or EL2 then there are cases where
+ * we override the CPSR.F in determining if the exception is
+ * masked or not. If neither of these are set then we fall back
+ * to the CPSR.F setting otherwise we further assess the state
+ * below.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ scr = (env->cp15.scr_el3 & SCR_FIQ);
+
+ /* When EL3 is 32-bit, the SCR.FW bit controls whether the
+ * CPSR.F bit masks FIQ interrupts when taken in non-secure
+ * state. If SCR.FW is set then FIQs can be masked by CPSR.F
+ * when non-secure but only when FIQs are only routed to EL3.
+ */
+ scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
+ break;
+ case EXCP_IRQ:
+ /* When EL3 execution state is 32-bit, if HCR.IMO is set then
+ * we may override the CPSR.I masking when in non-secure state.
+ * The SCR.IRQ setting has already been taken into consideration
+ * when setting the target EL, so it does not have a further
+ * affect here.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ scr = false;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((scr || hcr) && !secure) {
+ unmasked = 1;
+ }
}
}
@@ -1587,7 +1687,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
#define cpu_exec cpu_arm_exec
-#define cpu_gen_code cpu_arm_gen_code
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
@@ -1667,7 +1766,7 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
}
/* Determine the current mmu_idx to use for normal loads/stores */
-static inline int cpu_mmu_index(CPUARMState *env)
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
@@ -1677,16 +1776,43 @@ static inline int cpu_mmu_index(CPUARMState *env)
return el;
}
-/* Return the Exception Level targeted by debug exceptions;
- * currently always EL1 since we don't implement EL2 or EL3.
- */
+/* Indexes used when registering address spaces with cpu_address_space_init */
+typedef enum ARMASIdx {
+ ARMASIdx_NS = 0,
+ ARMASIdx_S = 1,
+} ARMASIdx;
+
+/* Return the Exception Level targeted by debug exceptions. */
static inline int arm_debug_target_el(CPUARMState *env)
{
- return 1;
+ bool secure = arm_is_secure(env);
+ bool route_to_el2 = false;
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
+ route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
+ env->cp15.mdcr_el2 & (1 << 8);
+ }
+
+ if (route_to_el2) {
+ return 2;
+ } else if (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) && secure) {
+ return 3;
+ } else {
+ return 1;
+ }
}
static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
{
+ if (arm_is_secure(env)) {
+ /* MDCR_EL3.SDD disables debug events from Secure state */
+ if (extract32(env->cp15.mdcr_el3, 16, 1) != 0
+ || arm_current_el(env) == 3) {
+ return false;
+ }
+ }
+
if (arm_current_el(env) == arm_debug_target_el(env)) {
if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
|| (env->daif & PSTATE_D)) {
@@ -1698,10 +1824,42 @@ static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
{
- if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) {
+ int el = arm_current_el(env);
+
+ if (el == 0 && arm_el_is_aa64(env, 1)) {
return aa64_generate_debug_exceptions(env);
}
- return arm_current_el(env) != 2;
+
+ if (arm_is_secure(env)) {
+ int spd;
+
+ if (el == 0 && (env->cp15.sder & 1)) {
+ /* SDER.SUIDEN means debug exceptions from Secure EL0
+ * are always enabled. Otherwise they are controlled by
+ * SDCR.SPD like those from other Secure ELs.
+ */
+ return true;
+ }
+
+ spd = extract32(env->cp15.mdcr_el3, 14, 2);
+ switch (spd) {
+ case 1:
+ /* SPD == 0b01 is reserved, but behaves as 0b00. */
+ case 0:
+ /* For 0b00 we return true if external secure invasive debug
+ * is enabled. On real hardware this is controlled by external
+ * signals to the core. QEMU always permits debug, and behaves
+ * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
+ */
+ return true;
+ case 2:
+ return false;
+ case 3:
+ return true;
+ }
+ }
+
+ return el != 2;
}
/* Return true if debugging exceptions are currently enabled.
@@ -1737,6 +1895,53 @@ static inline bool arm_singlestep_active(CPUARMState *env)
&& arm_generate_debug_exceptions(env);
}
+static inline bool arm_sctlr_b(CPUARMState *env)
+{
+ return
+ /* We need not implement SCTLR.ITD in user-mode emulation, so
+ * let linux-user ignore the fact that it conflicts with SCTLR_B.
+ * This lets people run BE32 binaries with "-cpu any".
+ */
+#ifndef CONFIG_USER_ONLY
+ !arm_feature(env, ARM_FEATURE_V7) &&
+#endif
+ (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
+}
+
+/* Return true if the processor is in big-endian mode. */
+static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
+{
+ int cur_el;
+
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
+ if (!is_a64(env)) {
+ return
+#ifdef CONFIG_USER_ONLY
+ /* In system mode, BE32 is modelled in line with the
+ * architecture (as word-invariant big-endianness), where loads
+ * and stores are done little endian but from addresses which
+ * are adjusted by XORing with the appropriate constant. So the
+ * endianness to use for the raw data access is not affected by
+ * SCTLR.B.
+ * In user mode, however, we model BE32 as byte-invariant
+ * big-endianness (because user-only code cannot tell the
+ * difference), and so we need to use a data access endianness
+ * that depends on SCTLR.B.
+ */
+ arm_sctlr_b(env) ||
+#endif
+ ((env->uncached_cpsr & CPSR_E) ? 1 : 0);
+ }
+
+ cur_el = arm_current_el(env);
+
+ if (cur_el == 0) {
+ return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
+ }
+
+ return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
+}
+
#include "exec/cpu-all.h"
/* Bit usage in the TB flags field: bit 31 indicates whether we are
@@ -1767,8 +1972,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC_SHIFT 8
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
-#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
-#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
+#define ARM_TBFLAG_SCTLR_B_SHIFT 16
+#define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT)
/* We store the bottom two bits of the CPAR as TB flags and handle
* checks on the other bits at runtime
*/
@@ -1780,6 +1985,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
*/
#define ARM_TBFLAG_NS_SHIFT 19
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
+#define ARM_TBFLAG_BE_DATA_SHIFT 20
+#define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT)
/* Bit usage when in AArch64 state: currently we have no A64 specific bits */
@@ -1804,12 +2011,34 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC(F) \
(((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
-#define ARM_TBFLAG_BSWAP_CODE(F) \
- (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
+#define ARM_TBFLAG_SCTLR_B(F) \
+ (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
#define ARM_TBFLAG_NS(F) \
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
+#define ARM_TBFLAG_BE_DATA(F) \
+ (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
+
+static inline bool bswap_code(bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
+ * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
+ * would also end up as a mixed-endian mode with BE code, LE data.
+ */
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ sctlr_b;
+#else
+ /* All code access in ARM is little endian, and there are no loaders
+ * doing swaps that need to be reversed
+ */
+ return 0;
+#endif
+}
/* Return the exception level to which FP-disabled exceptions should
* be taken, or 0 if FP is enabled.
@@ -1876,6 +2105,17 @@ static inline int fp_exception_el(CPUARMState *env)
return 0;
}
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_cpu_bswap_data(CPUARMState *env)
+{
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ arm_cpu_data_is_big_endian(env);
+}
+#endif
+
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
@@ -1888,7 +2128,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
| (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
| (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
| (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
- | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
+ | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
if (!(access_secure_reg(env))) {
*flags |= ARM_TBFLAG_NS_MASK;
}
@@ -1900,7 +2140,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
@@ -1920,6 +2160,9 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
}
+ if (arm_cpu_data_is_big_endian(env)) {
+ *flags |= ARM_TBFLAG_BE_DATA_MASK;
+ }
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
*cs_base = 0;
@@ -1933,4 +2176,21 @@ enum {
QEMU_PSCI_CONDUIT_HVC = 2,
};
+#ifndef CONFIG_USER_ONLY
+/* Return the address space index to use for a memory access */
+static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
+}
+
+/* Return the AddressSpace to use for a memory access
+ * (which depends on whether the access is S or NS, and whether
+ * the board gave us a separate AddressSpace for S accesses).
+ */
+static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
+}
+#endif
+
#endif