summaryrefslogtreecommitdiffstats
path: root/qemu/target-arm
diff options
context:
space:
mode:
authorJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:18:31 +0300
committerJosé Pekkarinen <jose.pekkarinen@nokia.com>2016-05-18 13:42:15 +0300
commit437fd90c0250dee670290f9b714253671a990160 (patch)
treeb871786c360704244a07411c69fb58da9ead4a06 /qemu/target-arm
parent5bbd6fe9b8bab2a93e548c5a53b032d1939eec05 (diff)
These changes are the raw update to qemu-2.6.
Collission happened in the following patches: migration: do cleanup operation after completion(738df5b9) Bug fix.(1750c932f86) kvmclock: add a new function to update env->tsc.(b52baab2) The code provided by the patches was already in the upstreamed version. Change-Id: I3cc11841a6a76ae20887b2e245710199e1ea7f9a Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'qemu/target-arm')
-rw-r--r--qemu/target-arm/Makefile.objs3
-rw-r--r--qemu/target-arm/arch_dump.c337
-rw-r--r--qemu/target-arm/arm-semi.c180
-rw-r--r--qemu/target-arm/arm_ldst.h8
-rw-r--r--qemu/target-arm/cpu-qom.h31
-rw-r--r--qemu/target-arm/cpu.c136
-rw-r--r--qemu/target-arm/cpu.h392
-rw-r--r--qemu/target-arm/cpu64.c15
-rw-r--r--qemu/target-arm/crypto_helper.c2
-rw-r--r--qemu/target-arm/gdbstub.c4
-rw-r--r--qemu/target-arm/gdbstub64.c2
-rw-r--r--qemu/target-arm/helper-a64.c105
-rw-r--r--qemu/target-arm/helper.c2587
-rw-r--r--qemu/target-arm/helper.h9
-rw-r--r--qemu/target-arm/internals.h123
-rw-r--r--qemu/target-arm/iwmmxt_helper.c3
-rw-r--r--qemu/target-arm/kvm-consts.h1
-rw-r--r--qemu/target-arm/kvm-stub.c1
-rw-r--r--qemu/target-arm/kvm.c109
-rw-r--r--qemu/target-arm/kvm32.c89
-rw-r--r--qemu/target-arm/kvm64.c474
-rw-r--r--qemu/target-arm/kvm_arm.h61
-rw-r--r--qemu/target-arm/machine.c23
-rw-r--r--qemu/target-arm/monitor.c84
-rw-r--r--qemu/target-arm/neon_helper.c3
-rw-r--r--qemu/target-arm/op_helper.c437
-rw-r--r--qemu/target-arm/psci.c1
-rw-r--r--qemu/target-arm/translate-a64.c599
-rw-r--r--qemu/target-arm/translate.c1240
-rw-r--r--qemu/target-arm/translate.h35
30 files changed, 5586 insertions, 1508 deletions
diff --git a/qemu/target-arm/Makefile.objs b/qemu/target-arm/Makefile.objs
index 9460b409a..82cbe6bba 100644
--- a/qemu/target-arm/Makefile.objs
+++ b/qemu/target-arm/Makefile.objs
@@ -1,5 +1,5 @@
obj-y += arm-semi.o
-obj-$(CONFIG_SOFTMMU) += machine.o
+obj-$(CONFIG_SOFTMMU) += machine.o psci.o arch_dump.o monitor.o
obj-$(CONFIG_KVM) += kvm.o
obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o
obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o
@@ -7,6 +7,5 @@ obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-y += translate.o op_helper.o helper.o cpu.o
obj-y += neon_helper.o iwmmxt_helper.o
obj-y += gdbstub.o
-obj-$(CONFIG_SOFTMMU) += psci.o
obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
obj-y += crypto_helper.o
diff --git a/qemu/target-arm/arch_dump.c b/qemu/target-arm/arch_dump.c
new file mode 100644
index 000000000..1a9861f69
--- /dev/null
+++ b/qemu/target-arm/arch_dump.c
@@ -0,0 +1,337 @@
+/* Support for writing ELF notes for ARM architectures
+ *
+ * Copyright (C) 2015 Red Hat Inc.
+ *
+ * Author: Andrew Jones <drjones@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+
+/* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */
+struct aarch64_user_regs {
+ uint64_t regs[31];
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t pstate;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_regs) != 272);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct aarch64_elf_prstatus {
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct aarch64_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+ char pad3[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_elf_prstatus) != 392);
+
+/* struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h
+ *
+ * While the vregs member of user_fpsimd_state is of type __uint128_t,
+ * QEMU uses an array of uint64_t, where the high half of the 128-bit
+ * value is always in the 2n+1'th index. Thus we also break the 128-
+ * bit values into two halves in this reproduction of user_fpsimd_state.
+ */
+struct aarch64_user_vfp_state {
+ uint64_t vregs[64];
+ uint32_t fpsr;
+ uint32_t fpcr;
+ char pad[8];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_vfp_state) != 528);
+
+struct aarch64_note {
+ Elf64_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ union {
+ struct aarch64_elf_prstatus prstatus;
+ struct aarch64_user_vfp_state vfp;
+ };
+} QEMU_PACKED;
+
+#define AARCH64_NOTE_HEADER_SIZE offsetof(struct aarch64_note, prstatus)
+#define AARCH64_PRSTATUS_NOTE_SIZE \
+ (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_elf_prstatus))
+#define AARCH64_PRFPREG_NOTE_SIZE \
+ (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_user_vfp_state))
+
+static void aarch64_note_init(struct aarch64_note *note, DumpState *s,
+ const char *name, Elf64_Word namesz,
+ Elf64_Word type, Elf64_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f,
+ CPUARMState *env, int cpuid,
+ DumpState *s)
+{
+ struct aarch64_note note;
+ int ret, i;
+
+ aarch64_note_init(&note, s, "CORE", 5, NT_PRFPREG, sizeof(note.vfp));
+
+ for (i = 0; i < 64; ++i) {
+ note.vfp.vregs[i] = cpu_to_dump64(s, float64_val(env->vfp.regs[i]));
+ }
+
+ if (s->dump_info.d_endian == ELFDATA2MSB) {
+ /* For AArch64 we must always swap the vfp.regs's 2n and 2n+1
+ * entries when generating BE notes, because even big endian
+ * hosts use 2n+1 for the high half.
+ */
+ for (i = 0; i < 32; ++i) {
+ uint64_t tmp = note.vfp.vregs[2*i];
+ note.vfp.vregs[2*i] = note.vfp.vregs[2*i+1];
+ note.vfp.vregs[2*i+1] = tmp;
+ }
+ }
+
+ note.vfp.fpsr = cpu_to_dump32(s, vfp_get_fpsr(env));
+ note.vfp.fpcr = cpu_to_dump32(s, vfp_get_fpcr(env));
+
+ ret = f(&note, AARCH64_PRFPREG_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct aarch64_note note;
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ DumpState *s = opaque;
+ uint64_t pstate, sp;
+ int ret, i;
+
+ aarch64_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1);
+
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ pstate = cpsr_read(env);
+ sp = 0;
+ } else {
+ pstate = pstate_read(env);
+ sp = env->xregs[31];
+ }
+
+ for (i = 0; i < 31; ++i) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->xregs[i]);
+ }
+ note.prstatus.pr_reg.sp = cpu_to_dump64(s, sp);
+ note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc);
+ note.prstatus.pr_reg.pstate = cpu_to_dump64(s, pstate);
+
+ ret = f(&note, AARCH64_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return aarch64_write_elf64_prfpreg(f, env, cpuid, s);
+}
+
+/* struct pt_regs from arch/arm/include/asm/ptrace.h */
+struct arm_user_regs {
+ uint32_t regs[17];
+ char pad[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_user_regs) != 72);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct arm_elf_prstatus {
+ char pad1[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct arm_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+} QEMU_PACKED arm_elf_prstatus;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_elf_prstatus) != 148);
+
+/* struct user_vfp from arch/arm/include/asm/user.h */
+struct arm_user_vfp_state {
+ uint64_t vregs[32];
+ uint32_t fpscr;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_user_vfp_state) != 260);
+
+struct arm_note {
+ Elf32_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("LINUX"), 4) */
+ union {
+ struct arm_elf_prstatus prstatus;
+ struct arm_user_vfp_state vfp;
+ };
+} QEMU_PACKED;
+
+#define ARM_NOTE_HEADER_SIZE offsetof(struct arm_note, prstatus)
+#define ARM_PRSTATUS_NOTE_SIZE \
+ (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_elf_prstatus))
+#define ARM_VFP_NOTE_SIZE \
+ (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_user_vfp_state))
+
+static void arm_note_init(struct arm_note *note, DumpState *s,
+ const char *name, Elf32_Word namesz,
+ Elf32_Word type, Elf32_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int arm_write_elf32_vfp(WriteCoreDumpFunction f, CPUARMState *env,
+ int cpuid, DumpState *s)
+{
+ struct arm_note note;
+ int ret, i;
+
+ arm_note_init(&note, s, "LINUX", 6, NT_ARM_VFP, sizeof(note.vfp));
+
+ for (i = 0; i < 32; ++i) {
+ note.vfp.vregs[i] = cpu_to_dump64(s, float64_val(env->vfp.regs[i]));
+ }
+
+ note.vfp.fpscr = cpu_to_dump32(s, vfp_get_fpscr(env));
+
+ ret = f(&note, ARM_VFP_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct arm_note note;
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ DumpState *s = opaque;
+ int ret, i, fpvalid = !!arm_feature(env, ARM_FEATURE_VFP);
+
+ arm_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, fpvalid);
+
+ for (i = 0; i < 16; ++i) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->regs[i]);
+ }
+ note.prstatus.pr_reg.regs[16] = cpu_to_dump32(s, cpsr_read(env));
+
+ ret = f(&note, ARM_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ } else if (fpvalid) {
+ return arm_write_elf32_vfp(f, env, cpuid, s);
+ }
+
+ return 0;
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ ARMCPU *cpu = ARM_CPU(first_cpu);
+ CPUARMState *env = &cpu->env;
+ GuestPhysBlock *block;
+ hwaddr lowest_addr = ULLONG_MAX;
+
+ /* Take a best guess at the phys_base. If we get it wrong then crash
+ * will need '--machdep phys_offset=<phys-offset>' added to its command
+ * line, which isn't any worse than assuming we can use zero, but being
+ * wrong. This is the same algorithm the crash utility uses when
+ * attempting to guess as it loads non-dumpfile formatted files.
+ */
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_start < lowest_addr) {
+ lowest_addr = block->target_start;
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ info->d_machine = EM_AARCH64;
+ info->d_class = ELFCLASS64;
+ info->page_size = (1 << 16); /* aarch64 max pagesize */
+ if (lowest_addr != ULLONG_MAX) {
+ info->phys_base = lowest_addr;
+ }
+ } else {
+ info->d_machine = EM_ARM;
+ info->d_class = ELFCLASS32;
+ info->page_size = (1 << 12);
+ if (lowest_addr < UINT_MAX) {
+ info->phys_base = lowest_addr;
+ }
+ }
+
+ /* We assume the relevant endianness is that of EL1; this is right
+ * for kernels, but might give the wrong answer if you're trying to
+ * dump a hypervisor that happens to be running an opposite-endian
+ * kernel.
+ */
+ info->d_endian = (env->cp15.sctlr_el[1] & SCTLR_EE) != 0
+ ? ELFDATA2MSB : ELFDATA2LSB;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ ARMCPU *cpu = ARM_CPU(first_cpu);
+ CPUARMState *env = &cpu->env;
+ size_t note_size;
+
+ if (class == ELFCLASS64) {
+ note_size = AARCH64_PRSTATUS_NOTE_SIZE;
+ note_size += AARCH64_PRFPREG_NOTE_SIZE;
+ } else {
+ note_size = ARM_PRSTATUS_NOTE_SIZE;
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ note_size += ARM_VFP_NOTE_SIZE;
+ }
+ }
+
+ return note_size * nr_cpus;
+}
diff --git a/qemu/target-arm/arm-semi.c b/qemu/target-arm/arm-semi.c
index a2a736956..8be0645eb 100644
--- a/qemu/target-arm/arm-semi.c
+++ b/qemu/target-arm/arm-semi.c
@@ -18,13 +18,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <time.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/semihost.h"
@@ -36,6 +30,7 @@
#include "qemu-common.h"
#include "exec/gdbstub.h"
#include "hw/arm/arm.h"
+#include "qemu/cutils.h"
#endif
#define TARGET_SYS_OPEN 0x01
@@ -58,6 +53,7 @@
#define TARGET_SYS_GET_CMDLINE 0x15
#define TARGET_SYS_HEAPINFO 0x16
#define TARGET_SYS_EXIT 0x18
+#define TARGET_SYS_SYNCCACHE 0x19
/* ADP_Stopped_ApplicationExit is used for exit(0),
* anything else is implemented as exit(1) */
@@ -134,6 +130,7 @@ static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
#ifdef CONFIG_USER_ONLY
TaskState *ts = cs->opaque;
#endif
+ target_ulong reg0 = is_a64(env) ? env->xregs[0] : env->regs[0];
if (ret == (target_ulong)-1) {
#ifdef CONFIG_USER_ONLY
@@ -141,22 +138,46 @@ static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
#else
syscall_err = err;
#endif
- env->regs[0] = ret;
+ reg0 = ret;
} else {
/* Fixup syscalls that use nonstardard return conventions. */
- switch (env->regs[0]) {
+ switch (reg0) {
case TARGET_SYS_WRITE:
case TARGET_SYS_READ:
- env->regs[0] = arm_semi_syscall_len - ret;
+ reg0 = arm_semi_syscall_len - ret;
break;
case TARGET_SYS_SEEK:
- env->regs[0] = 0;
+ reg0 = 0;
break;
default:
- env->regs[0] = ret;
+ reg0 = ret;
break;
}
}
+ if (is_a64(env)) {
+ env->xregs[0] = reg0;
+ } else {
+ env->regs[0] = reg0;
+ }
+}
+
+static target_ulong arm_flen_buf(ARMCPU *cpu)
+{
+ /* Return an address in target memory of 64 bytes where the remote
+ * gdb should write its stat struct. (The format of this structure
+ * is defined by GDB's remote protocol and is not target-specific.)
+ * We put this on the guest's stack just below SP.
+ */
+ CPUARMState *env = &cpu->env;
+ target_ulong sp;
+
+ if (is_a64(env)) {
+ sp = env->xregs[31];
+ } else {
+ sp = env->regs[13];
+ }
+
+ return sp - 64;
}
static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
@@ -166,8 +187,13 @@ static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
/* The size is always stored in big-endian order, extract
the value. We assume the size always fit in 32 bits. */
uint32_t size;
- cpu_memory_rw_debug(cs, env->regs[13]-64+32, (uint8_t *)&size, 4, 0);
- env->regs[0] = be32_to_cpu(size);
+ cpu_memory_rw_debug(cs, arm_flen_buf(cpu) + 32, (uint8_t *)&size, 4, 0);
+ size = be32_to_cpu(size);
+ if (is_a64(env)) {
+ env->xregs[0] = size;
+ } else {
+ env->regs[0] = size;
+ }
#ifdef CONFIG_USER_ONLY
((TaskState *)cs->opaque)->swi_errno = err;
#else
@@ -175,17 +201,46 @@ static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
#endif
}
+static target_ulong arm_gdb_syscall(ARMCPU *cpu, gdb_syscall_complete_cb cb,
+ const char *fmt, ...)
+{
+ va_list va;
+ CPUARMState *env = &cpu->env;
+
+ va_start(va, fmt);
+ gdb_do_syscallv(cb, fmt, va);
+ va_end(va);
+
+ /* FIXME: we are implicitly relying on the syscall completing
+ * before this point, which is not guaranteed. We should
+ * put in an explicit synchronization between this and
+ * the callback function.
+ */
+
+ return is_a64(env) ? env->xregs[0] : env->regs[0];
+}
+
/* Read the input value from the argument block; fail the semihosting
* call if the memory read fails.
*/
#define GET_ARG(n) do { \
- if (get_user_ual(arg ## n, args + (n) * 4)) { \
- return (uint32_t)-1; \
+ if (is_a64(env)) { \
+ if (get_user_u64(arg ## n, args + (n) * 8)) { \
+ return -1; \
+ } \
+ } else { \
+ if (get_user_u32(arg ## n, args + (n) * 4)) { \
+ return -1; \
+ } \
} \
} while (0)
-#define SET_ARG(n, val) put_user_ual(val, args + (n) * 4)
-uint32_t do_arm_semihosting(CPUARMState *env)
+#define SET_ARG(n, val) \
+ (is_a64(env) ? \
+ put_user_u64(val, args + (n) * 8) : \
+ put_user_u32(val, args + (n) * 4))
+
+target_ulong do_arm_semihosting(CPUARMState *env)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
@@ -201,8 +256,15 @@ uint32_t do_arm_semihosting(CPUARMState *env)
CPUARMState *ts = env;
#endif
- nr = env->regs[0];
- args = env->regs[1];
+ if (is_a64(env)) {
+ /* Note that the syscall number is in W0, not X0 */
+ nr = env->xregs[0] & 0xffffffffU;
+ args = env->xregs[1];
+ } else {
+ nr = env->regs[0];
+ args = env->regs[1];
+ }
+
switch (nr) {
case TARGET_SYS_OPEN:
GET_ARG(0);
@@ -223,9 +285,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
return result_fileno;
}
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "open,%s,%x,1a4", arg0,
- (int)arg2+1, gdb_open_modeflags[arg1]);
- ret = env->regs[0];
+ ret = arm_gdb_syscall(cpu, arm_semi_cb, "open,%s,%x,1a4", arg0,
+ (int)arg2+1, gdb_open_modeflags[arg1]);
} else {
ret = set_swi_errno(ts, open(s, open_modeflags[arg1], 0644));
}
@@ -234,8 +295,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
case TARGET_SYS_CLOSE:
GET_ARG(0);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "close,%x", arg0);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "close,%x", arg0);
} else {
return set_swi_errno(ts, close(arg0));
}
@@ -248,8 +308,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
return (uint32_t)-1;
/* Write to debug console. stderr is near enough. */
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "write,2,%x,1", args);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "write,2,%x,1", args);
} else {
return write(STDERR_FILENO, &c, 1);
}
@@ -260,8 +319,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
return (uint32_t)-1;
len = strlen(s);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "write,2,%x,%x\n", args, len);
- ret = env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "write,2,%x,%x",
+ args, len);
} else {
ret = write(STDERR_FILENO, s, len);
}
@@ -274,8 +333,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
len = arg2;
if (use_gdb_syscalls()) {
arm_semi_syscall_len = len;
- gdb_do_syscall(arm_semi_cb, "write,%x,%x,%x", arg0, arg1, len);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "write,%x,%x,%x",
+ arg0, arg1, len);
} else {
s = lock_user(VERIFY_READ, arg1, len, 1);
if (!s) {
@@ -295,8 +354,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
len = arg2;
if (use_gdb_syscalls()) {
arm_semi_syscall_len = len;
- gdb_do_syscall(arm_semi_cb, "read,%x,%x,%x", arg0, arg1, len);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "read,%x,%x,%x",
+ arg0, arg1, len);
} else {
s = lock_user(VERIFY_WRITE, arg1, len, 0);
if (!s) {
@@ -317,8 +376,7 @@ uint32_t do_arm_semihosting(CPUARMState *env)
case TARGET_SYS_ISTTY:
GET_ARG(0);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "isatty,%x", arg0);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "isatty,%x", arg0);
} else {
return isatty(arg0);
}
@@ -326,8 +384,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
GET_ARG(0);
GET_ARG(1);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "lseek,%x,%x,0", arg0, arg1);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "lseek,%x,%x,0",
+ arg0, arg1);
} else {
ret = set_swi_errno(ts, lseek(arg0, arg1, SEEK_SET));
if (ret == (uint32_t)-1)
@@ -337,9 +395,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
case TARGET_SYS_FLEN:
GET_ARG(0);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_flen_cb, "fstat,%x,%x",
- arg0, env->regs[13]-64);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_flen_cb, "fstat,%x,%x",
+ arg0, arm_flen_buf(cpu));
} else {
struct stat buf;
ret = set_swi_errno(ts, fstat(arg0, &buf));
@@ -354,8 +411,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
GET_ARG(0);
GET_ARG(1);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "unlink,%s", arg0, (int)arg1+1);
- ret = env->regs[0];
+ ret = arm_gdb_syscall(cpu, arm_semi_cb, "unlink,%s",
+ arg0, (int)arg1+1);
} else {
s = lock_user_string(arg0);
if (!s) {
@@ -372,9 +429,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
GET_ARG(2);
GET_ARG(3);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "rename,%s,%s",
- arg0, (int)arg1+1, arg2, (int)arg3+1);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "rename,%s,%s",
+ arg0, (int)arg1+1, arg2, (int)arg3+1);
} else {
char *s2;
s = lock_user_string(arg0);
@@ -398,8 +454,8 @@ uint32_t do_arm_semihosting(CPUARMState *env)
GET_ARG(0);
GET_ARG(1);
if (use_gdb_syscalls()) {
- gdb_do_syscall(arm_semi_cb, "system,%s", arg0, (int)arg1+1);
- return env->regs[0];
+ return arm_gdb_syscall(cpu, arm_semi_cb, "system,%s",
+ arg0, (int)arg1+1);
} else {
s = lock_user_string(arg0);
if (!s) {
@@ -558,11 +614,35 @@ uint32_t do_arm_semihosting(CPUARMState *env)
return 0;
}
case TARGET_SYS_EXIT:
- /* ARM specifies only Stopped_ApplicationExit as normal
- * exit, everything else is considered an error */
- ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1;
+ if (is_a64(env)) {
+ /* The A64 version of this call takes a parameter block,
+ * so the application-exit type can return a subcode which
+ * is the exit status code from the application.
+ */
+ GET_ARG(0);
+ GET_ARG(1);
+
+ if (arg0 == ADP_Stopped_ApplicationExit) {
+ ret = arg1;
+ } else {
+ ret = 1;
+ }
+ } else {
+ /* ARM specifies only Stopped_ApplicationExit as normal
+ * exit, everything else is considered an error */
+ ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1;
+ }
gdb_exit(env, ret);
exit(ret);
+ case TARGET_SYS_SYNCCACHE:
+ /* Clean the D-cache and invalidate the I-cache for the specified
+ * virtual address range. This is a nop for us since we don't
+ * implement caches. This is only present on A64.
+ */
+ if (is_a64(env)) {
+ return 0;
+ }
+ /* fall through -- invalid for A32/T32 */
default:
fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
cpu_dump_state(cs, stderr, fprintf, 0);
diff --git a/qemu/target-arm/arm_ldst.h b/qemu/target-arm/arm_ldst.h
index b1ece0173..35c2c4391 100644
--- a/qemu/target-arm/arm_ldst.h
+++ b/qemu/target-arm/arm_ldst.h
@@ -25,10 +25,10 @@
/* Load an instruction and return it in the standard little-endian order */
static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
- bool do_swap)
+ bool sctlr_b)
{
uint32_t insn = cpu_ldl_code(env, addr);
- if (do_swap) {
+ if (bswap_code(sctlr_b)) {
return bswap32(insn);
}
return insn;
@@ -36,10 +36,10 @@ static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
/* Ditto, for a halfword (Thumb) instruction */
static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
- bool do_swap)
+ bool sctlr_b)
{
uint16_t insn = cpu_lduw_code(env, addr);
- if (do_swap) {
+ if (bswap_code(sctlr_b)) {
return bswap16(insn);
}
return insn;
diff --git a/qemu/target-arm/cpu-qom.h b/qemu/target-arm/cpu-qom.h
index 3cbc4a006..1061c08a1 100644
--- a/qemu/target-arm/cpu-qom.h
+++ b/qemu/target-arm/cpu-qom.h
@@ -87,6 +87,9 @@ typedef struct ARMCPU {
/* GPIO outputs for generic timer */
qemu_irq gt_timer_outputs[NUM_GTIMERS];
+ /* MemoryRegion to use for secure physical accesses */
+ MemoryRegion *secure_memory;
+
/* 'compatible' string for this CPU for Linux device trees */
const char *dtb_compatible;
@@ -145,11 +148,14 @@ typedef struct ARMCPU {
uint32_t id_pfr0;
uint32_t id_pfr1;
uint32_t id_dfr0;
+ uint32_t pmceid0;
+ uint32_t pmceid1;
uint32_t id_afr0;
uint32_t id_mmfr0;
uint32_t id_mmfr1;
uint32_t id_mmfr2;
uint32_t id_mmfr3;
+ uint32_t id_mmfr4;
uint32_t id_isar0;
uint32_t id_isar1;
uint32_t id_isar2;
@@ -216,20 +222,39 @@ bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
int flags);
-hwaddr arm_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+
/* Callback functions for the generic timer's timers. */
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
+void arm_gt_htimer_cb(void *opaque);
+void arm_gt_stimer_cb(void *opaque);
+
+#define ARM_AFF0_SHIFT 0
+#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
+#define ARM_AFF1_SHIFT 8
+#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT)
+#define ARM_AFF2_SHIFT 16
+#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT)
+#define ARM_AFF3_SHIFT 32
+#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT)
+
+#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK)
+#define ARM64_AFFINITY_MASK \
+ (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK)
#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
-
-void aarch64_cpu_do_interrupt(CPUState *cs);
#endif
#endif
diff --git a/qemu/target-arm/cpu.c b/qemu/target-arm/cpu.c
index 8b4323dd0..e48e83acb 100644
--- a/qemu/target-arm/cpu.c
+++ b/qemu/target-arm/cpu.c
@@ -18,6 +18,8 @@
* <http://www.gnu.org/licenses/gpl-2.0.html>
*/
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "cpu.h"
#include "internals.h"
#include "qemu-common.h"
@@ -79,6 +81,27 @@ static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
}
}
+static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
+{
+ /* Purely an assertion check: we've already done reset once,
+ * so now check that running the reset for the cpreg doesn't
+ * change its value. This traps bugs where two different cpregs
+ * both try to reset the same state field but to different values.
+ */
+ ARMCPRegInfo *ri = value;
+ ARMCPU *cpu = opaque;
+ uint64_t oldvalue, newvalue;
+
+ if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
+ return;
+ }
+
+ oldvalue = read_raw_cp_reg(&cpu->env, ri);
+ cp_reg_reset(key, value, opaque);
+ newvalue = read_raw_cp_reg(&cpu->env, ri);
+ assert(oldvalue == newvalue);
+}
+
/* CPUClass::reset() */
static void arm_cpu_reset(CPUState *s)
{
@@ -90,6 +113,8 @@ static void arm_cpu_reset(CPUState *s)
memset(env, 0, offsetof(CPUARMState, features));
g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
+ g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
+
env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
@@ -308,10 +333,7 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
switch (irq) {
case ARM_CPU_VIRQ:
case ARM_CPU_VFIQ:
- if (!arm_feature(env, ARM_FEATURE_EL2)) {
- hw_error("%s: Virtual interrupt line %d with no EL2 support\n",
- __func__, irq);
- }
+ assert(arm_feature(env, ARM_FEATURE_EL2));
/* fall through */
case ARM_CPU_IRQ:
case ARM_CPU_FIQ:
@@ -322,7 +344,7 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
}
break;
default:
- hw_error("arm_cpu_set_irq: Bad interrupt line %d\n", irq);
+ g_assert_not_reached();
}
}
@@ -341,33 +363,20 @@ static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
break;
default:
- hw_error("arm_cpu_kvm_set_irq: Bad interrupt line %d\n", irq);
+ g_assert_not_reached();
}
kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
#endif
}
-static bool arm_cpu_is_big_endian(CPUState *cs)
+static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- int cur_el;
cpu_synchronize_state(cs);
-
- /* In 32bit guest endianness is determined by looking at CPSR's E bit */
- if (!is_a64(env)) {
- return (env->uncached_cpsr & CPSR_E) ? 1 : 0;
- }
-
- cur_el = arm_current_el(env);
-
- if (cur_el == 0) {
- return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
- }
-
- return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
+ return arm_cpu_data_is_big_endian(env);
}
#endif
@@ -406,7 +415,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
} else {
info->print_insn = print_insn_arm;
}
- if (env->bswap_code) {
+ if (bswap_code(arm_sctlr_b(env))) {
#ifdef TARGET_WORDS_BIGENDIAN
info->endian = BFD_ENDIAN_LITTLE;
#else
@@ -436,7 +445,7 @@ static void arm_cpu_initfn(Object *obj)
*/
Aff1 = cs->cpu_index / ARM_CPUS_PER_CLUSTER;
Aff0 = cs->cpu_index % ARM_CPUS_PER_CLUSTER;
- cpu->mp_affinity = (Aff1 << 8) | Aff0;
+ cpu->mp_affinity = (Aff1 << ARM_AFF1_SHIFT) | Aff0;
#ifndef CONFIG_USER_ONLY
/* Our inbound IRQ and FIQ lines */
@@ -453,6 +462,10 @@ static void arm_cpu_initfn(Object *obj)
arm_gt_ptimer_cb, cpu);
cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
arm_gt_vtimer_cb, cpu);
+ cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
+ arm_gt_htimer_cb, cpu);
+ cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
+ arm_gt_stimer_cb, cpu);
qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
ARRAY_SIZE(cpu->gt_timer_outputs));
#endif
@@ -518,6 +531,15 @@ static void arm_cpu_post_init(Object *obj)
*/
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
&error_abort);
+
+#ifndef CONFIG_USER_ONLY
+ object_property_add_link(obj, "secure-memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&cpu->secure_memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
+#endif
}
if (arm_feature(&cpu->env, ARM_FEATURE_MPU)) {
@@ -616,6 +638,15 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->id_aa64pfr0 &= ~0xf000;
}
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ /* Disable the hypervisor feature bits in the processor feature
+ * registers if we don't have EL2. These are id_pfr1[15:12] and
+ * id_aa64pfr0_el1[11:8].
+ */
+ cpu->id_aa64pfr0 &= ~0xf00;
+ cpu->id_pfr1 &= ~0xf000;
+ }
+
if (!cpu->has_mpu) {
unset_feature(env, ARM_FEATURE_MPU);
}
@@ -625,7 +656,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
uint32_t nr = cpu->pmsav7_dregion;
if (nr > 0xff) {
- error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32 "\n", nr);
+ error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
return;
}
@@ -641,6 +672,29 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
init_cpreg_list(cpu);
+#ifndef CONFIG_USER_ONLY
+ if (cpu->has_el3) {
+ cs->num_ases = 2;
+ } else {
+ cs->num_ases = 1;
+ }
+
+ if (cpu->has_el3) {
+ AddressSpace *as;
+
+ if (!cpu->secure_memory) {
+ cpu->secure_memory = cs->memory;
+ }
+ as = address_space_init_shareable(cpu->secure_memory,
+ "cpu-secure-memory");
+ cpu_address_space_init(cs, as, ARMASIdx_S);
+ }
+ cpu_address_space_init(cs,
+ address_space_init_shareable(cs->memory,
+ "cpu-memory"),
+ ARMASIdx_NS);
+#endif
+
qemu_init_vcpu(cs);
cpu_reset(cs);
@@ -1090,6 +1144,8 @@ static void cortex_a15_initfn(Object *obj)
cpu->id_pfr0 = 0x00001131;
cpu->id_pfr1 = 0x00011011;
cpu->id_dfr0 = 0x02010555;
+ cpu->pmceid0 = 0x0000000;
+ cpu->pmceid1 = 0x00000000;
cpu->id_afr0 = 0x00000000;
cpu->id_mmfr0 = 0x10201105;
cpu->id_mmfr1 = 0x20000000;
@@ -1369,6 +1425,17 @@ static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
}
#endif
+static gchar *arm_gdb_arch_name(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ return g_strdup("iwmmxt");
+ }
+ return g_strdup("arm");
+}
+
static void arm_cpu_class_init(ObjectClass *oc, void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
@@ -1393,16 +1460,33 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
#else
cc->do_interrupt = arm_cpu_do_interrupt;
- cc->get_phys_page_debug = arm_cpu_get_phys_page_debug;
+ cc->do_unaligned_access = arm_cpu_do_unaligned_access;
+ cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
+ cc->asidx_from_attrs = arm_asidx_from_attrs;
cc->vmsd = &vmstate_arm_cpu;
- cc->virtio_is_big_endian = arm_cpu_is_big_endian;
+ cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
+ cc->write_elf64_note = arm_cpu_write_elf64_note;
+ cc->write_elf32_note = arm_cpu_write_elf32_note;
#endif
cc->gdb_num_core_regs = 26;
cc->gdb_core_xml_file = "arm-core.xml";
+ cc->gdb_arch_name = arm_gdb_arch_name;
cc->gdb_stop_before_watchpoint = true;
cc->debug_excp_handler = arm_debug_excp_handler;
+ cc->debug_check_watchpoint = arm_debug_check_watchpoint;
cc->disas_set_info = arm_disas_set_info;
+
+ /*
+ * Reason: arm_cpu_initfn() calls cpu_exec_init(), which saves
+ * the object in cpus -> dangling pointer after final
+ * object_unref().
+ *
+ * Once this is fixed, the devices that create ARM CPUs should be
+ * updated not to set cannot_destroy_with_object_finalize_yet,
+ * unless they still screw up something else.
+ */
+ dc->cannot_destroy_with_object_finalize_yet = true;
}
static void cpu_register(const ARMCPUInfo *info)
diff --git a/qemu/target-arm/cpu.h b/qemu/target-arm/cpu.h
index 7e89152bd..066ff678d 100644
--- a/qemu/target-arm/cpu.h
+++ b/qemu/target-arm/cpu.h
@@ -19,17 +19,14 @@
#ifndef CPU_ARM_H
#define CPU_ARM_H
-#include "config.h"
#include "kvm-consts.h"
#if defined(TARGET_AARCH64)
/* AArch64 definitions */
# define TARGET_LONG_BITS 64
-# define ELF_MACHINE EM_AARCH64
#else
# define TARGET_LONG_BITS 32
-# define ELF_MACHINE EM_ARM
#endif
#define TARGET_IS_BIENDIAN 1
@@ -56,6 +53,7 @@
#define EXCP_SMC 13 /* Secure Monitor Call */
#define EXCP_VIRQ 14
#define EXCP_VFIQ 15
+#define EXCP_SEMIHOST 16 /* semihosting call (A64 only) */
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -96,6 +94,7 @@
struct arm_boot_info;
#define NB_MMU_MODES 7
+#define TARGET_INSN_START_EXTRA_WORDS 1
/* We currently assume float and double are IEEE single and double
precision respectively.
@@ -113,7 +112,9 @@ typedef struct ARMGenericTimer {
#define GTIMER_PHYS 0
#define GTIMER_VIRT 1
-#define NUM_GTIMERS 2
+#define GTIMER_HYP 2
+#define GTIMER_SEC 3
+#define NUM_GTIMERS 4
typedef struct {
uint64_t raw_tcr;
@@ -170,7 +171,7 @@ typedef struct CPUARMState {
uint32_t GE; /* cpsr[19:16] */
uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
- uint64_t daif; /* exception masks, in the bits they are in in PSTATE */
+ uint64_t daif; /* exception masks, in the bits they are in PSTATE */
uint64_t elr_el[4]; /* AArch64 exception link regs */
uint64_t sp_el[4]; /* AArch64 banked stack pointers */
@@ -219,10 +220,12 @@ typedef struct CPUARMState {
};
uint64_t ttbr1_el[4];
};
+ uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
/* MMU translation table base control. */
TCR tcr_el[4];
- uint32_t c2_data; /* MPU data cachable bits. */
- uint32_t c2_insn; /* MPU instruction cachable bits. */
+ TCR vtcr_el2; /* Virtualization Translation Control. */
+ uint32_t c2_data; /* MPU data cacheable bits. */
+ uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
* MPU write buffer control.
*/
@@ -275,6 +278,7 @@ typedef struct CPUARMState {
};
uint64_t far_el[4];
};
+ uint64_t hpfar_el2;
union { /* Translation result. */
struct {
uint64_t _unused_par_0;
@@ -358,6 +362,8 @@ typedef struct CPUARMState {
};
uint64_t c14_cntfrq; /* Counter Frequency register */
uint64_t c14_cntkctl; /* Timer Control register */
+ uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
+ uint64_t cntvoff_el2; /* Counter Virtual Offset register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
uint32_t c15_cpar; /* XScale Coprocessor Access Register */
uint32_t c15_ticonfig; /* TI925T configuration byte. */
@@ -373,11 +379,16 @@ typedef struct CPUARMState {
uint64_t dbgwvr[16]; /* watchpoint value registers */
uint64_t dbgwcr[16]; /* watchpoint control registers */
uint64_t mdscr_el1;
+ uint64_t oslsr_el1; /* OS Lock Status */
+ uint64_t mdcr_el2;
+ uint64_t mdcr_el3;
/* If the counter is enabled, this stores the last time the counter
* was reset. Otherwise it stores the counter value
*/
uint64_t c15_ccnt;
uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
+ uint64_t vpidr_el2; /* Virtualization Processor ID Register */
+ uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
} cp15;
struct {
@@ -467,9 +478,6 @@ typedef struct CPUARMState {
uint32_t cregs[16];
} iwmmxt;
- /* For mixed endian mode. */
- bool bswap_code;
-
#if defined(CONFIG_USER_ONLY)
/* For usermode syscall translation. */
int eabi;
@@ -500,7 +508,7 @@ typedef struct CPUARMState {
ARMCPU *cpu_arm_init(const char *cpu_model);
int cpu_arm_exec(CPUState *cpu);
-uint32_t do_arm_semihosting(CPUARMState *env);
+target_ulong do_arm_semihosting(CPUARMState *env);
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -583,6 +591,22 @@ void pmccntr_sync(CPUARMState *env);
#define CPTR_TTA (1U << 20)
#define CPTR_TFP (1U << 10)
+#define MDCR_EPMAD (1U << 21)
+#define MDCR_EDAD (1U << 20)
+#define MDCR_SPME (1U << 17)
+#define MDCR_SDD (1U << 16)
+#define MDCR_SPD (3U << 14)
+#define MDCR_TDRA (1U << 11)
+#define MDCR_TDOSA (1U << 10)
+#define MDCR_TDA (1U << 9)
+#define MDCR_TDE (1U << 8)
+#define MDCR_HPME (1U << 7)
+#define MDCR_TPM (1U << 6)
+#define MDCR_TPMCR (1U << 5)
+
+/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
+#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
+
#define CPSR_M (0x1fU)
#define CPSR_T (1U << 5)
#define CPSR_F (1U << 6)
@@ -695,8 +719,17 @@ static inline void pstate_write(CPUARMState *env, uint32_t val)
/* Return the current CPSR value. */
uint32_t cpsr_read(CPUARMState *env);
-/* Set the CPSR. Note that some bits of mask must be all-set or all-clear. */
-void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask);
+
+typedef enum CPSRWriteType {
+ CPSRWriteByInstr = 0, /* from guest MSR or CPS */
+ CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
+ CPSRWriteRaw = 2, /* trust values, do not switch reg banks */
+ CPSRWriteByGDBStub = 3, /* from the GDB stub */
+} CPSRWriteType;
+
+/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type);
/* Return the current xPSR value. */
static inline uint32_t xpsr_read(CPUARMState *env)
@@ -920,7 +953,7 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env)
if (arm_feature(env, ARM_FEATURE_EL3)) {
return !(env->cp15.scr_el3 & SCR_NS);
} else {
- /* If EL2 is not supported then the secure state is implementation
+ /* If EL3 is not supported then the secure state is implementation
* defined, in which case QEMU defaults to non-secure.
*/
return false;
@@ -958,18 +991,33 @@ static inline bool arm_is_secure(CPUARMState *env)
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
{
- /* We don't currently support EL2, and this isn't valid for EL0
- * (if we're in EL0, is_a64() is what you want, and if we're not in EL0
- * then the state of EL0 isn't well defined.)
+ /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
+ * and if we're not in EL0 then the state of EL0 isn't well defined.)
*/
- assert(el == 1 || el == 3);
+ assert(el >= 1 && el <= 3);
+ bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
- /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This
- * is a QEMU-imposed simplification which we may wish to change later.
- * If we in future support EL2 and/or EL3, then the state of lower
- * exception levels is controlled by the HCR.RW and SCR.RW bits.
+ /* The highest exception level is always at the maximum supported
+ * register width, and then lower levels have a register width controlled
+ * by bits in the SCR or HCR registers.
*/
- return arm_feature(env, ARM_FEATURE_AARCH64);
+ if (el == 3) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
+ }
+
+ if (el == 2) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
+ aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
+ }
+
+ return aa64;
}
/* Function for determing whether guest cp register reads and writes should
@@ -1008,11 +1056,11 @@ static inline bool access_secure_reg(CPUARMState *env)
*/
#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
A32_BANKED_REG_GET((_env), _regname, \
- ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))))
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
A32_BANKED_REG_SET((_env), _regname, \
- ((!arm_el_is_aa64((_env), 3) && arm_is_secure(_env))), \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
(_val))
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
@@ -1228,6 +1276,18 @@ static inline bool cptype_valid(int cptype)
#define PL1_RW (PL1_R | PL1_W)
#define PL0_RW (PL0_R | PL0_W)
+/* Return the highest implemented Exception Level */
+static inline int arm_highest_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return 3;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ return 2;
+ }
+ return 1;
+}
+
/* Return the current Exception Level (as per ARMv8; note that this differs
* from the ARMv7 Privilege Level).
*/
@@ -1280,6 +1340,14 @@ typedef enum CPAccessResult {
/* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
CP_ACCESS_TRAP_EL2 = 3,
CP_ACCESS_TRAP_EL3 = 4,
+ /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
+ /* Access fails and results in an exception syndrome for an FP access,
+ * trapped directly to EL2 or EL3
+ */
+ CP_ACCESS_TRAP_FP_EL2 = 7,
+ CP_ACCESS_TRAP_FP_EL3 = 8,
} CPAccessResult;
/* Access functions for coprocessor registers. These cannot fail and
@@ -1289,7 +1357,9 @@ typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
uint64_t value);
/* Access permission check functions for coprocessor registers. */
-typedef CPAccessResult CPAccessFn(CPUARMState *env, const ARMCPRegInfo *opaque);
+typedef CPAccessResult CPAccessFn(CPUARMState *env,
+ const ARMCPRegInfo *opaque,
+ bool isread);
/* Hook function for register reset */
typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
@@ -1445,6 +1515,9 @@ static inline bool cp_access_ok(int current_el,
return (ri->access >> ((current_el * 2) + isread)) & 1;
}
+/* Raw read of a coprocessor register (as needed for migration, etc) */
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
+
/**
* write_list_to_cpustate
* @cpu: ARMCPU
@@ -1477,7 +1550,7 @@ bool write_list_to_cpustate(ARMCPU *cpu);
*/
bool write_cpustate_to_list(ARMCPU *cpu);
-/* Does the core conform to the the "MicroController" profile. e.g. Cortex-M3.
+/* Does the core conform to the "MicroController" profile. e.g. Cortex-M3.
Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
conventional cores (ie. Application or Realtime profile). */
@@ -1509,8 +1582,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
CPUARMState *env = cs->env_ptr;
unsigned int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
- uint32_t scr;
- uint32_t hcr;
bool pstate_unmasked;
int8_t unmasked = 0;
@@ -1524,31 +1595,10 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
switch (excp_idx) {
case EXCP_FIQ:
- /* If FIQs are routed to EL3 or EL2 then there are cases where we
- * override the CPSR.F in determining if the exception is masked or
- * not. If neither of these are set then we fall back to the CPSR.F
- * setting otherwise we further assess the state below.
- */
- hcr = (env->cp15.hcr_el2 & HCR_FMO);
- scr = (env->cp15.scr_el3 & SCR_FIQ);
-
- /* When EL3 is 32-bit, the SCR.FW bit controls whether the CPSR.F bit
- * masks FIQ interrupts when taken in non-secure state. If SCR.FW is
- * set then FIQs can be masked by CPSR.F when non-secure but only
- * when FIQs are only routed to EL3.
- */
- scr &= !((env->cp15.scr_el3 & SCR_FW) && !hcr);
pstate_unmasked = !(env->daif & PSTATE_F);
break;
case EXCP_IRQ:
- /* When EL3 execution state is 32-bit, if HCR.IMO is set then we may
- * override the CPSR.I masking when in non-secure state. The SCR.IRQ
- * setting has already been taken into consideration when setting the
- * target EL, so it does not have a further affect here.
- */
- hcr = (env->cp15.hcr_el2 & HCR_IMO);
- scr = false;
pstate_unmasked = !(env->daif & PSTATE_I);
break;
@@ -1573,8 +1623,58 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* interrupt.
*/
if ((target_el > cur_el) && (target_el != 1)) {
- if (arm_el_is_aa64(env, 3) || ((scr || hcr) && (!secure))) {
- unmasked = 1;
+ /* Exceptions targeting a higher EL may not be maskable */
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 64-bit masking rules are simple: exceptions to EL3
+ * can't be masked, and exceptions to EL2 can only be
+ * masked from Secure state. The HCR and SCR settings
+ * don't affect the masking logic, only the interrupt routing.
+ */
+ if (target_el == 3 || !secure) {
+ unmasked = 1;
+ }
+ } else {
+ /* The old 32-bit-only environment has a more complicated
+ * masking setup. HCR and SCR bits not only affect interrupt
+ * routing but also change the behaviour of masking.
+ */
+ bool hcr, scr;
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ /* If FIQs are routed to EL3 or EL2 then there are cases where
+ * we override the CPSR.F in determining if the exception is
+ * masked or not. If neither of these are set then we fall back
+ * to the CPSR.F setting otherwise we further assess the state
+ * below.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ scr = (env->cp15.scr_el3 & SCR_FIQ);
+
+ /* When EL3 is 32-bit, the SCR.FW bit controls whether the
+ * CPSR.F bit masks FIQ interrupts when taken in non-secure
+ * state. If SCR.FW is set then FIQs can be masked by CPSR.F
+ * when non-secure but only when FIQs are only routed to EL3.
+ */
+ scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
+ break;
+ case EXCP_IRQ:
+ /* When EL3 execution state is 32-bit, if HCR.IMO is set then
+ * we may override the CPSR.I masking when in non-secure state.
+ * The SCR.IRQ setting has already been taken into consideration
+ * when setting the target EL, so it does not have a further
+ * affect here.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ scr = false;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((scr || hcr) && !secure) {
+ unmasked = 1;
+ }
}
}
@@ -1587,7 +1687,6 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
#define cpu_exec cpu_arm_exec
-#define cpu_gen_code cpu_arm_gen_code
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
@@ -1667,7 +1766,7 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
}
/* Determine the current mmu_idx to use for normal loads/stores */
-static inline int cpu_mmu_index(CPUARMState *env)
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
{
int el = arm_current_el(env);
@@ -1677,16 +1776,43 @@ static inline int cpu_mmu_index(CPUARMState *env)
return el;
}
-/* Return the Exception Level targeted by debug exceptions;
- * currently always EL1 since we don't implement EL2 or EL3.
- */
+/* Indexes used when registering address spaces with cpu_address_space_init */
+typedef enum ARMASIdx {
+ ARMASIdx_NS = 0,
+ ARMASIdx_S = 1,
+} ARMASIdx;
+
+/* Return the Exception Level targeted by debug exceptions. */
static inline int arm_debug_target_el(CPUARMState *env)
{
- return 1;
+ bool secure = arm_is_secure(env);
+ bool route_to_el2 = false;
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
+ route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
+ env->cp15.mdcr_el2 & (1 << 8);
+ }
+
+ if (route_to_el2) {
+ return 2;
+ } else if (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) && secure) {
+ return 3;
+ } else {
+ return 1;
+ }
}
static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
{
+ if (arm_is_secure(env)) {
+ /* MDCR_EL3.SDD disables debug events from Secure state */
+ if (extract32(env->cp15.mdcr_el3, 16, 1) != 0
+ || arm_current_el(env) == 3) {
+ return false;
+ }
+ }
+
if (arm_current_el(env) == arm_debug_target_el(env)) {
if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
|| (env->daif & PSTATE_D)) {
@@ -1698,10 +1824,42 @@ static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
{
- if (arm_current_el(env) == 0 && arm_el_is_aa64(env, 1)) {
+ int el = arm_current_el(env);
+
+ if (el == 0 && arm_el_is_aa64(env, 1)) {
return aa64_generate_debug_exceptions(env);
}
- return arm_current_el(env) != 2;
+
+ if (arm_is_secure(env)) {
+ int spd;
+
+ if (el == 0 && (env->cp15.sder & 1)) {
+ /* SDER.SUIDEN means debug exceptions from Secure EL0
+ * are always enabled. Otherwise they are controlled by
+ * SDCR.SPD like those from other Secure ELs.
+ */
+ return true;
+ }
+
+ spd = extract32(env->cp15.mdcr_el3, 14, 2);
+ switch (spd) {
+ case 1:
+ /* SPD == 0b01 is reserved, but behaves as 0b00. */
+ case 0:
+ /* For 0b00 we return true if external secure invasive debug
+ * is enabled. On real hardware this is controlled by external
+ * signals to the core. QEMU always permits debug, and behaves
+ * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
+ */
+ return true;
+ case 2:
+ return false;
+ case 3:
+ return true;
+ }
+ }
+
+ return el != 2;
}
/* Return true if debugging exceptions are currently enabled.
@@ -1737,6 +1895,53 @@ static inline bool arm_singlestep_active(CPUARMState *env)
&& arm_generate_debug_exceptions(env);
}
+static inline bool arm_sctlr_b(CPUARMState *env)
+{
+ return
+ /* We need not implement SCTLR.ITD in user-mode emulation, so
+ * let linux-user ignore the fact that it conflicts with SCTLR_B.
+ * This lets people run BE32 binaries with "-cpu any".
+ */
+#ifndef CONFIG_USER_ONLY
+ !arm_feature(env, ARM_FEATURE_V7) &&
+#endif
+ (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
+}
+
+/* Return true if the processor is in big-endian mode. */
+static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
+{
+ int cur_el;
+
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
+ if (!is_a64(env)) {
+ return
+#ifdef CONFIG_USER_ONLY
+ /* In system mode, BE32 is modelled in line with the
+ * architecture (as word-invariant big-endianness), where loads
+ * and stores are done little endian but from addresses which
+ * are adjusted by XORing with the appropriate constant. So the
+ * endianness to use for the raw data access is not affected by
+ * SCTLR.B.
+ * In user mode, however, we model BE32 as byte-invariant
+ * big-endianness (because user-only code cannot tell the
+ * difference), and so we need to use a data access endianness
+ * that depends on SCTLR.B.
+ */
+ arm_sctlr_b(env) ||
+#endif
+ ((env->uncached_cpsr & CPSR_E) ? 1 : 0);
+ }
+
+ cur_el = arm_current_el(env);
+
+ if (cur_el == 0) {
+ return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
+ }
+
+ return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
+}
+
#include "exec/cpu-all.h"
/* Bit usage in the TB flags field: bit 31 indicates whether we are
@@ -1767,8 +1972,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC_SHIFT 8
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
-#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
-#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
+#define ARM_TBFLAG_SCTLR_B_SHIFT 16
+#define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT)
/* We store the bottom two bits of the CPAR as TB flags and handle
* checks on the other bits at runtime
*/
@@ -1780,6 +1985,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
*/
#define ARM_TBFLAG_NS_SHIFT 19
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
+#define ARM_TBFLAG_BE_DATA_SHIFT 20
+#define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT)
/* Bit usage when in AArch64 state: currently we have no A64 specific bits */
@@ -1804,12 +2011,34 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
#define ARM_TBFLAG_CONDEXEC(F) \
(((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
-#define ARM_TBFLAG_BSWAP_CODE(F) \
- (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
+#define ARM_TBFLAG_SCTLR_B(F) \
+ (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
#define ARM_TBFLAG_NS(F) \
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
+#define ARM_TBFLAG_BE_DATA(F) \
+ (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
+
+static inline bool bswap_code(bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
+ * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
+ * would also end up as a mixed-endian mode with BE code, LE data.
+ */
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ sctlr_b;
+#else
+ /* All code access in ARM is little endian, and there are no loaders
+ * doing swaps that need to be reversed
+ */
+ return 0;
+#endif
+}
/* Return the exception level to which FP-disabled exceptions should
* be taken, or 0 if FP is enabled.
@@ -1876,6 +2105,17 @@ static inline int fp_exception_el(CPUARMState *env)
return 0;
}
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_cpu_bswap_data(CPUARMState *env)
+{
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ arm_cpu_data_is_big_endian(env);
+}
+#endif
+
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
@@ -1888,7 +2128,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
| (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
| (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
| (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
- | (env->bswap_code << ARM_TBFLAG_BSWAP_CODE_SHIFT);
+ | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
if (!(access_secure_reg(env))) {
*flags |= ARM_TBFLAG_NS_MASK;
}
@@ -1900,7 +2140,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
}
- *flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
+ *flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
@@ -1920,6 +2160,9 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
}
}
}
+ if (arm_cpu_data_is_big_endian(env)) {
+ *flags |= ARM_TBFLAG_BE_DATA_MASK;
+ }
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
*cs_base = 0;
@@ -1933,4 +2176,21 @@ enum {
QEMU_PSCI_CONDUIT_HVC = 2,
};
+#ifndef CONFIG_USER_ONLY
+/* Return the address space index to use for a memory access */
+static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
+}
+
+/* Return the AddressSpace to use for a memory access
+ * (which depends on whether the access is S or NS, and whether
+ * the board gave us a separate AddressSpace for S accesses).
+ */
+static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
+}
+#endif
+
#endif
diff --git a/qemu/target-arm/cpu64.c b/qemu/target-arm/cpu64.c
index 63c8b1cfa..1635debc1 100644
--- a/qemu/target-arm/cpu64.c
+++ b/qemu/target-arm/cpu64.c
@@ -18,6 +18,8 @@
* <http://www.gnu.org/licenses/gpl-2.0.html>
*/
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "cpu.h"
#include "qemu-common.h"
#if !defined(CONFIG_USER_ONLY)
@@ -108,6 +110,7 @@ static void aarch64_a57_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
cpu->midr = 0x411fd070;
cpu->revidr = 0x00000000;
@@ -133,6 +136,8 @@ static void aarch64_a57_initfn(Object *obj)
cpu->id_isar5 = 0x00011121;
cpu->id_aa64pfr0 = 0x00002222;
cpu->id_aa64dfr0 = 0x10305106;
+ cpu->pmceid0 = 0x00000000;
+ cpu->pmceid1 = 0x00000000;
cpu->id_aa64isar0 = 0x00011120;
cpu->id_aa64mmfr0 = 0x00001124;
cpu->dbgdidr = 0x3516d000;
@@ -160,6 +165,7 @@ static void aarch64_a53_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
cpu->midr = 0x410fd034;
cpu->revidr = 0x00000000;
@@ -286,19 +292,22 @@ static void aarch64_cpu_set_pc(CPUState *cs, vaddr value)
}
}
+static gchar *aarch64_gdb_arch_name(CPUState *cs)
+{
+ return g_strdup("aarch64");
+}
+
static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
{
CPUClass *cc = CPU_CLASS(oc);
-#if !defined(CONFIG_USER_ONLY)
- cc->do_interrupt = aarch64_cpu_do_interrupt;
-#endif
cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
cc->set_pc = aarch64_cpu_set_pc;
cc->gdb_read_register = aarch64_cpu_gdb_read_register;
cc->gdb_write_register = aarch64_cpu_gdb_write_register;
cc->gdb_num_core_regs = 34;
cc->gdb_core_xml_file = "aarch64-core.xml";
+ cc->gdb_arch_name = aarch64_gdb_arch_name;
}
static void aarch64_cpu_register(const ARMCPUInfo *info)
diff --git a/qemu/target-arm/crypto_helper.c b/qemu/target-arm/crypto_helper.c
index 5d2283806..3b6df3f41 100644
--- a/qemu/target-arm/crypto_helper.c
+++ b/qemu/target-arm/crypto_helper.c
@@ -9,7 +9,7 @@
* version 2 of the License, or (at your option) any later version.
*/
-#include <stdlib.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
diff --git a/qemu/target-arm/gdbstub.c b/qemu/target-arm/gdbstub.c
index 1c3439654..3ba9aadd4 100644
--- a/qemu/target-arm/gdbstub.c
+++ b/qemu/target-arm/gdbstub.c
@@ -17,7 +17,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "exec/gdbstub.h"
@@ -94,7 +94,7 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
return 4;
case 25:
/* CPSR */
- cpsr_write(env, tmp, 0xffffffff);
+ cpsr_write(env, tmp, 0xffffffff, CPSRWriteByGDBStub);
return 4;
}
/* Unknown register. */
diff --git a/qemu/target-arm/gdbstub64.c b/qemu/target-arm/gdbstub64.c
index 8f3b8d177..634c6bc6f 100644
--- a/qemu/target-arm/gdbstub64.c
+++ b/qemu/target-arm/gdbstub64.c
@@ -16,7 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "exec/gdbstub.h"
diff --git a/qemu/target-arm/helper-a64.c b/qemu/target-arm/helper-a64.c
index 08c95a3f5..c7bfb4d8f 100644
--- a/qemu/target-arm/helper-a64.c
+++ b/qemu/target-arm/helper-a64.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/gdbstub.h"
#include "exec/helper-proto.h"
@@ -70,20 +71,7 @@ uint32_t HELPER(clz32)(uint32_t x)
uint64_t HELPER(rbit64)(uint64_t x)
{
- /* assign the correct byte position */
- x = bswap64(x);
-
- /* assign the correct nibble position */
- x = ((x & 0xf0f0f0f0f0f0f0f0ULL) >> 4)
- | ((x & 0x0f0f0f0f0f0f0f0fULL) << 4);
-
- /* assign the correct bit position */
- x = ((x & 0x8888888888888888ULL) >> 3)
- | ((x & 0x4444444444444444ULL) >> 1)
- | ((x & 0x2222222222222222ULL) << 1)
- | ((x & 0x1111111111111111ULL) << 3);
-
- return x;
+ return revbit64(x);
}
/* Convert a softfloat float_relation_ (as returned by
@@ -455,92 +443,3 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
/* Linux crc32c converts the output to one's complement. */
return crc32c(acc, buf, bytes) ^ 0xffffffff;
}
-
-#if !defined(CONFIG_USER_ONLY)
-
-/* Handle a CPU exception. */
-void aarch64_cpu_do_interrupt(CPUState *cs)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- unsigned int new_el = env->exception.target_el;
- target_ulong addr = env->cp15.vbar_el[new_el];
- unsigned int new_mode = aarch64_pstate_mode(new_el, true);
-
- if (arm_current_el(env) < new_el) {
- if (env->aarch64) {
- addr += 0x400;
- } else {
- addr += 0x600;
- }
- } else if (pstate_read(env) & PSTATE_SP) {
- addr += 0x200;
- }
-
- arm_log_exception(cs->exception_index);
- qemu_log_mask(CPU_LOG_INT, "...from EL%d\n", arm_current_el(env));
- if (qemu_loglevel_mask(CPU_LOG_INT)
- && !excp_is_internal(cs->exception_index)) {
- qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%" PRIx32 "\n",
- env->exception.syndrome);
- }
-
- if (arm_is_psci_call(cpu, cs->exception_index)) {
- arm_handle_psci_call(cpu);
- qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
- return;
- }
-
- switch (cs->exception_index) {
- case EXCP_PREFETCH_ABORT:
- case EXCP_DATA_ABORT:
- env->cp15.far_el[new_el] = env->exception.vaddress;
- qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
- env->cp15.far_el[new_el]);
- /* fall through */
- case EXCP_BKPT:
- case EXCP_UDEF:
- case EXCP_SWI:
- case EXCP_HVC:
- case EXCP_HYP_TRAP:
- case EXCP_SMC:
- env->cp15.esr_el[new_el] = env->exception.syndrome;
- break;
- case EXCP_IRQ:
- case EXCP_VIRQ:
- addr += 0x80;
- break;
- case EXCP_FIQ:
- case EXCP_VFIQ:
- addr += 0x100;
- break;
- default:
- cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
- }
-
- if (is_a64(env)) {
- env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
- aarch64_save_sp(env, arm_current_el(env));
- env->elr_el[new_el] = env->pc;
- } else {
- env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
- if (!env->thumb) {
- env->cp15.esr_el[new_el] |= 1 << 25;
- }
- env->elr_el[new_el] = env->regs[15];
-
- aarch64_sync_32_to_64(env);
-
- env->condexec_bits = 0;
- }
- qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
- env->elr_el[new_el]);
-
- pstate_write(env, PSTATE_DAIF | new_mode);
- env->aarch64 = 1;
- aarch64_restore_sp(env, new_el);
-
- env->pc = addr;
- cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
-}
-#endif
diff --git a/qemu/target-arm/helper.c b/qemu/target-arm/helper.c
index 01f0d0dac..09638b2e7 100644
--- a/qemu/target-arm/helper.c
+++ b/qemu/target-arm/helper.c
@@ -1,3 +1,4 @@
+#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
#include "exec/gdbstub.h"
@@ -11,12 +12,22 @@
#include "arm_ldst.h"
#include <zlib.h> /* For crc32 */
#include "exec/semihost.h"
+#include "sysemu/kvm.h"
+
+#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY
-static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, uint32_t *fsr);
+static bool get_phys_addr(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi);
+
+static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
+ target_ulong *page_size_ptr, uint32_t *fsr,
+ ARMMMUFaultInfo *fi);
/* Definitions for the PMCCNTR and PMCR registers */
#define PMCRD 0x8
@@ -144,7 +155,7 @@ static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
return (char *)env + ri->fieldoffset;
}
-static uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* Raw read of a coprocessor register (as needed for migration, etc). */
if (ri->type & ARM_CP_CONST) {
@@ -325,6 +336,127 @@ void init_cpreg_list(ARMCPU *cpu)
g_list_free(keys);
}
+/*
+ * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
+ * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
+ *
+ * access_el3_aa32ns: Used to check AArch32 register views.
+ * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
+ */
+static CPAccessResult access_el3_aa32ns(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ bool secure = arm_is_secure_below_el3(env);
+
+ assert(!arm_el_is_aa64(env, 3));
+ if (secure) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (!arm_el_is_aa64(env, 3)) {
+ return access_el3_aa32ns(env, ri, isread);
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Some secure-only AArch32 registers trap to EL3 if used from
+ * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
+ * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
+ * We assume that the .access field is set to PL1_RW.
+ */
+static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3) {
+ return CP_ACCESS_OK;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ /* This will be EL1 NS and EL2 NS, which just UNDEF */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
+/* Check for traps to "powerdown debug" registers, which are controlled
+ * by MDCR.TDOSA
+ */
+static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to "debug ROM" registers, which are controlled
+ * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
+ */
+static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to general debug registers, which are controlled
+ * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
+ */
+static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to performance monitor registers, which are controlled
+ * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
+ */
+static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -595,7 +727,8 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->cp15.cpacr_el1 = value;
}
-static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
if (arm_feature(env, ARM_FEATURE_V8)) {
/* Check if CPACR accesses are to be trapped to EL2 */
@@ -612,7 +745,8 @@ static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri)
return CP_ACCESS_OK;
}
-static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
/* Check if CPTR accesses are set to trap to EL3 */
if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
@@ -627,8 +761,12 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
{ .name = "MVA_prefetch",
.cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NOP },
+ /* We need to break the TB after ISB to execute self-modifying code
+ * correctly and also to take any pending interrupts immediately.
+ * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
+ */
{ .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
- .access = PL0_W, .type = ARM_CP_NOP },
+ .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
{ .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
.access = PL0_W, .type = ARM_CP_NOP },
{ .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
@@ -650,14 +788,26 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
REGINFO_SENTINEL
};
-static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
/* Performance monitor registers user accessibility is controlled
- * by PMUSERENR.
+ * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
+ * trapping to EL2 or EL3 for other accesses.
*/
- if (arm_current_el(env) == 0 && !env->cp15.c9_pmuserenr) {
+ int el = arm_current_el(env);
+
+ if (el == 0 && !env->cp15.c9_pmuserenr) {
return CP_ACCESS_TRAP;
}
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
return CP_ACCESS_OK;
}
@@ -678,8 +828,8 @@ void pmccntr_sync(CPUARMState *env)
{
uint64_t temp_ticks;
- temp_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
- get_ticks_per_sec(), 1000000);
+ temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
@@ -717,8 +867,8 @@ static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
return env->cp15.c15_ccnt;
}
- total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
- get_ticks_per_sec(), 1000000);
+ total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
@@ -738,8 +888,8 @@ static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
- total_ticks = muldiv64(qemu_clock_get_us(QEMU_CLOCK_VIRTUAL),
- get_ticks_per_sec(), 1000000);
+ total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
if (env->cp15.c9_pmcr & PMCRD) {
/* Increment once every 64 processor clock cycles */
@@ -936,6 +1086,13 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.accessfn = pmreg_access,
.writefn = pmovsr_write,
.raw_writefn = raw_write },
+ { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
/* Unimplemented so WI. */
{ .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
.access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
@@ -973,19 +1130,30 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = pmreg_access },
{ .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
- .access = PL0_R | PL1_RW,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
.resetvalue = 0,
.writefn = pmuserenr_write, .raw_writefn = raw_write },
{ .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW,
+ .access = PL1_RW, .accessfn = access_tpm,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
.resetvalue = 0,
.writefn = pmintenset_write, .raw_writefn = raw_write },
{ .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
.writefn = pmintenclr_write, },
+ { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write },
{ .name = "VBAR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write,
@@ -1022,6 +1190,10 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
.resetvalue = 0 },
+ { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
+ .resetvalue = 0 },
/* For non-long-descriptor page tables these are PRRR and NMRR;
* regardless they still act as reads-as-written for QEMU.
*/
@@ -1090,7 +1262,8 @@ static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
env->teecr = value;
}
-static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
if (arm_current_el(env) == 0 && (env->teecr & 1)) {
return CP_ACCESS_TRAP;
@@ -1143,57 +1316,132 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
#ifndef CONFIG_USER_ONLY
-static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
- /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero */
- if (arm_current_el(env) == 0 && !extract32(env->cp15.c14_cntkctl, 0, 2)) {
- return CP_ACCESS_TRAP;
+ /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
+ * Writable only at the highest implemented exception level.
+ */
+ int el = arm_current_el(env);
+
+ switch (el) {
+ case 0:
+ if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
+ return CP_ACCESS_TRAP;
+ }
+ break;
+ case 1:
+ if (!isread && ri->state == ARM_CP_STATE_AA32 &&
+ arm_is_secure_below_el3(env)) {
+ /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ break;
+ case 2:
+ case 3:
+ break;
}
+
+ if (!isread && el < arm_highest_el(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+
return CP_ACCESS_OK;
}
-static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx)
+static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
+ bool isread)
{
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
/* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
- if (arm_current_el(env) == 0 &&
+ if (cur_el == 0 &&
!extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
return CP_ACCESS_TRAP;
}
+
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
+ timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
+ !extract32(env->cp15.cnthctl_el2, 0, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
return CP_ACCESS_OK;
}
-static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx)
+static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
+ bool isread)
{
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
/* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
* EL0[PV]TEN is zero.
*/
- if (arm_current_el(env) == 0 &&
+ if (cur_el == 0 &&
!extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
return CP_ACCESS_TRAP;
}
+
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
+ timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
+ !extract32(env->cp15.cnthctl_el2, 1, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
return CP_ACCESS_OK;
}
static CPAccessResult gt_pct_access(CPUARMState *env,
- const ARMCPRegInfo *ri)
+ const ARMCPRegInfo *ri,
+ bool isread)
{
- return gt_counter_access(env, GTIMER_PHYS);
+ return gt_counter_access(env, GTIMER_PHYS, isread);
}
static CPAccessResult gt_vct_access(CPUARMState *env,
- const ARMCPRegInfo *ri)
+ const ARMCPRegInfo *ri,
+ bool isread)
{
- return gt_counter_access(env, GTIMER_VIRT);
+ return gt_counter_access(env, GTIMER_VIRT, isread);
}
-static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
- return gt_timer_access(env, GTIMER_PHYS);
+ return gt_timer_access(env, GTIMER_PHYS, isread);
}
-static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
- return gt_timer_access(env, GTIMER_VIRT);
+ return gt_timer_access(env, GTIMER_VIRT, isread);
+}
+
+static CPAccessResult gt_stimer_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* The AArch64 register view of the secure physical timer is
+ * always accessible from EL3, and configurably accessible from
+ * Secure EL1.
+ */
+ switch (arm_current_el(env)) {
+ case 1:
+ if (!arm_is_secure(env)) {
+ return CP_ACCESS_TRAP;
+ }
+ if (!(env->cp15.scr_el3 & SCR_ST)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+ case 0:
+ case 2:
+ return CP_ACCESS_TRAP;
+ case 3:
+ return CP_ACCESS_OK;
+ default:
+ g_assert_not_reached();
+ }
}
static uint64_t gt_get_countervalue(CPUARMState *env)
@@ -1209,9 +1457,11 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
/* Timer enabled: calculate and set current ISTATUS, irq, and
* reset timer to when ISTATUS next has to change
*/
+ uint64_t offset = timeridx == GTIMER_VIRT ?
+ cpu->env.cp15.cntvoff_el2 : 0;
uint64_t count = gt_get_countervalue(&cpu->env);
/* Note that this must be unsigned 64 bit arithmetic: */
- int istatus = count >= gt->cval;
+ int istatus = count - offset >= gt->cval;
uint64_t nexttick;
gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
@@ -1222,7 +1472,7 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
nexttick = UINT64_MAX;
} else {
/* Next transition is when we hit cval */
- nexttick = gt->cval;
+ nexttick = gt->cval + offset;
}
/* Note that the desired next expiry time might be beyond the
* signed-64-bit range of a QEMUTimer -- in this case we just
@@ -1241,10 +1491,10 @@ static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
}
}
-static void gt_cnt_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
{
ARMCPU *cpu = arm_env_get_cpu(env);
- int timeridx = ri->opc1 & 1;
timer_del(cpu->gt_timer[timeridx]);
}
@@ -1254,38 +1504,44 @@ static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
return gt_get_countervalue(env);
}
+static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
+}
+
static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
uint64_t value)
{
- int timeridx = ri->opc1 & 1;
-
env->cp15.c14_timer[timeridx].cval = value;
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
}
-static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
{
- int timeridx = ri->crm & 1;
+ uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
- gt_get_countervalue(env));
+ (gt_get_countervalue(env) - offset));
}
static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
uint64_t value)
{
- int timeridx = ri->crm & 1;
+ uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
- env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) +
+ env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
sextract64(value, 0, 32);
gt_recalc_timer(arm_env_get_cpu(env), timeridx);
}
static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
uint64_t value)
{
ARMCPU *cpu = arm_env_get_cpu(env);
- int timeridx = ri->crm & 1;
uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
@@ -1301,6 +1557,127 @@ static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_PHYS);
+}
+
+static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_PHYS, value);
+}
+
+static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_PHYS);
+}
+
+static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_PHYS, value);
+}
+
+static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_PHYS, value);
+}
+
+static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_VIRT);
+}
+
+static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_VIRT, value);
+}
+
+static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_VIRT);
+}
+
+static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_VIRT, value);
+}
+
+static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_VIRT, value);
+}
+
+static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ raw_write(env, ri, value);
+ gt_recalc_timer(cpu, GTIMER_VIRT);
+}
+
+static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_HYP);
+}
+
+static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_HYP, value);
+}
+
+static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_HYP);
+}
+
+static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_HYP, value);
+}
+
+static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_HYP, value);
+}
+
+static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_SEC);
+}
+
+static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_SEC, value);
+}
+
+static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_SEC);
+}
+
+static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_SEC, value);
+}
+
+static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_SEC, value);
+}
+
void arm_gt_ptimer_cb(void *opaque)
{
ARMCPU *cpu = opaque;
@@ -1315,6 +1692,20 @@ void arm_gt_vtimer_cb(void *opaque)
gt_recalc_timer(cpu, GTIMER_VIRT);
}
+void arm_gt_htimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_HYP);
+}
+
+void arm_gt_stimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_SEC);
+}
+
static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
/* Note that CNTFRQ is purely reads-as-written for the benefit
* of software; writing it doesn't actually change the timer frequency.
@@ -1340,11 +1731,21 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
},
/* per-timer control */
{ .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .secure = ARM_CP_SECSTATE_NS,
.type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
.accessfn = gt_ptimer_access,
.fieldoffset = offsetoflow32(CPUARMState,
cp15.c14_timer[GTIMER_PHYS].ctl),
- .writefn = gt_ctl_write, .raw_writefn = raw_write,
+ .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CTL(S)",
+ .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .secure = ARM_CP_SECSTATE_S,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_SEC].ctl),
+ .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
},
{ .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
@@ -1352,14 +1753,14 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.accessfn = gt_ptimer_access,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
.resetvalue = 0,
- .writefn = gt_ctl_write, .raw_writefn = raw_write,
+ .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
.type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
.fieldoffset = offsetoflow32(CPUARMState,
cp15.c14_timer[GTIMER_VIRT].ctl),
- .writefn = gt_ctl_write, .raw_writefn = raw_write,
+ .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
@@ -1367,30 +1768,38 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.accessfn = gt_vtimer_access,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
.resetvalue = 0,
- .writefn = gt_ctl_write, .raw_writefn = raw_write,
+ .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
},
/* TimerValue views: a 32 bit downcounting view of the underlying state */
{ .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .secure = ARM_CP_SECSTATE_NS,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
+ },
+ { .name = "CNTP_TVAL(S)",
+ .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .secure = ARM_CP_SECSTATE_S,
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_ptimer_access,
- .readfn = gt_tval_read, .writefn = gt_tval_write,
+ .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
},
{ .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
- .accessfn = gt_ptimer_access,
- .readfn = gt_tval_read, .writefn = gt_tval_write,
+ .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
+ .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
},
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
- .readfn = gt_tval_read, .writefn = gt_tval_write,
+ .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
},
{ .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
.type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
- .accessfn = gt_vtimer_access,
- .readfn = gt_tval_read, .writefn = gt_tval_write,
+ .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
+ .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
},
/* The counter itself */
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
@@ -1401,27 +1810,34 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
{ .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
.access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = gt_pct_access,
- .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
+ .accessfn = gt_pct_access, .readfn = gt_cnt_read,
},
{ .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
.accessfn = gt_vct_access,
- .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
},
{ .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
.access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = gt_vct_access,
- .readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
+ .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
},
/* Comparison value, indicating when the timer goes off */
{ .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
+ .secure = ARM_CP_SECSTATE_NS,
.access = PL1_RW | PL0_R,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
.accessfn = gt_ptimer_access,
- .writefn = gt_cval_write, .raw_writefn = raw_write,
+ .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
+ .secure = ARM_CP_SECSTATE_S,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
+ .accessfn = gt_ptimer_access,
+ .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
},
{ .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
@@ -1429,14 +1845,14 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
.resetvalue = 0, .accessfn = gt_ptimer_access,
- .writefn = gt_cval_write, .raw_writefn = raw_write,
+ .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
.access = PL1_RW | PL0_R,
.type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
.accessfn = gt_vtimer_access,
- .writefn = gt_cval_write, .raw_writefn = raw_write,
+ .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
@@ -1444,7 +1860,33 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
.resetvalue = 0, .accessfn = gt_vtimer_access,
- .writefn = gt_cval_write, .raw_writefn = raw_write,
+ .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
+ },
+ /* Secure timer -- this is actually restricted to only EL3
+ * and configurably Secure-EL1 via the accessfn.
+ */
+ { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .readfn = gt_sec_tval_read,
+ .writefn = gt_sec_tval_write,
+ .resetfn = gt_sec_timer_reset,
+ },
+ { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
+ .resetvalue = 0,
+ .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
+ .type = ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
+ .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
},
REGINFO_SENTINEL
};
@@ -1474,15 +1916,21 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
#ifndef CONFIG_USER_ONLY
/* get_phys_addr() isn't present for user-mode-only targets */
-static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
if (ri->opc2 & 4) {
- /* Other states are only available with TrustZone; in
- * a non-TZ implementation these registers don't exist
- * at all, which is an Uncategorized trap. This underdecoding
- * is safe because the reginfo is NO_RAW.
+ /* The ATS12NSO* operations must trap to EL3 if executed in
+ * Secure EL1 (which can only happen if EL3 is AArch64).
+ * They are simply UNDEF if executed from NS EL1.
+ * They function normally from EL2 or EL3.
*/
- return CP_ACCESS_TRAP_UNCATEGORIZED;
+ if (arm_current_el(env) == 1) {
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
}
return CP_ACCESS_OK;
}
@@ -1497,9 +1945,10 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
bool ret;
uint64_t par64;
MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
ret = get_phys_addr(env, value, access_type, mmu_idx,
- &phys_addr, &attrs, &prot, &page_size, &fsr);
+ &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
if (extended_addresses_enabled(env)) {
/* fsr is a DFSR/IFSR value for the long descriptor
* translation table format, but with WnR always clear.
@@ -1602,6 +2051,26 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
A32_BANKED_CURRENT_REG_SET(env, par, par64);
}
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ uint64_t par64;
+
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -1629,10 +2098,10 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
break;
case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = ARMMMUIdx_S12NSE1;
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
break;
case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = ARMMMUIdx_S12NSE0;
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
break;
default:
g_assert_not_reached();
@@ -1649,6 +2118,7 @@ static const ARMCPRegInfo vapa_cp_reginfo[] = {
offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write },
#ifndef CONFIG_USER_ONLY
+ /* This underdecoding is safe because the reginfo is NO_RAW. */
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_W, .accessfn = ats_access,
.writefn = ats_write, .type = ARM_CP_NO_RAW },
@@ -1856,7 +2326,7 @@ static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
- /* Update the masks corresponding to the the TCR bank being written
+ /* Update the masks corresponding to the TCR bank being written
* Note that we always calculate mask and base_mask, but
* they are only used for short-descriptor tables (ie if EAE is 0);
* for long-descriptor tables the TCR fields are used differently
@@ -1918,6 +2388,20 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
raw_write(env, ri, value);
}
+static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
+ if (raw_read(env, ri) != value) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+ ARMMMUIdx_S2NS, -1);
+ raw_write(env, ri, value);
+ }
+}
+
static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_ALIAS,
@@ -2136,7 +2620,19 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
REGINFO_SENTINEL
};
-static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
+ return env->cp15.vpidr_el2;
+ }
+ return raw_read(env, ri);
+}
+
+static uint64_t mpidr_read_val(CPUARMState *env)
{
ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
uint64_t mpidr = cpu->mp_affinity;
@@ -2154,6 +2650,17 @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
return mpidr;
}
+static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
+ return env->cp15.vmpidr_el2;
+ }
+ return mpidr_read_val(env);
+}
+
static const ARMCPRegInfo mpidr_cp_reginfo[] = {
{ .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
@@ -2210,7 +2717,8 @@ static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
vfp_set_fpsr(env, value);
}
-static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
return CP_ACCESS_TRAP;
@@ -2225,7 +2733,8 @@ static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
static CPAccessResult aa64_cacheop_access(CPUARMState *env,
- const ARMCPRegInfo *ri)
+ const ARMCPRegInfo *ri,
+ bool isread)
{
/* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
* SCTLR_EL1.UCI is set.
@@ -2240,69 +2749,249 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env,
* Page D4-1736 (DDI0487A.b)
*/
-static void tlbi_aa64_va_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+ }
+}
+
+static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ bool sec = arm_is_secure_below_el3(env);
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+ ARMMMUIdx_S2NS, -1);
+ } else {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
+}
+
+static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
- /* Invalidate by VA (AArch64 version) */
ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
+}
+
+static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ bool sec = arm_is_secure_below_el3(env);
+ bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else if (has_el2) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
+ } else {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
+ }
+}
+
+static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
+ }
+}
+
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL1&0 (AArch64 version).
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page(CPU(cpu), pageaddr);
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
+ ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
}
-static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
- /* Invalidate by VA, all ASIDs (AArch64 version) */
+ /* Invalidate by VA, EL2
+ * Currently handles both VAE2 and VALE2, since we don't support
+ * flush-last-level-only.
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
uint64_t pageaddr = sextract64(value << 12, 0, 56);
- tlb_flush_page(CPU(cpu), pageaddr);
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
}
-static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- /* Invalidate by ASID (AArch64 version) */
+ /* Invalidate by VA, EL3
+ * Currently handles both VAE3 and VALE3, since we don't support
+ * flush-last-level-only.
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
- int asid = extract64(value, 48, 16);
- tlb_flush(CPU(cpu), asid == 0);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
}
-static void tlbi_aa64_va_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
+ bool sec = arm_is_secure_below_el3(env);
CPUState *other_cs;
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush_page(other_cs, pageaddr);
+ if (sec) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
+ ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
}
}
-static void tlbi_aa64_vaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
CPUState *other_cs;
uint64_t pageaddr = sextract64(value << 12, 0, 56);
CPU_FOREACH(other_cs) {
- tlb_flush_page(other_cs, pageaddr);
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
}
}
-static void tlbi_aa64_asid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
+static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
+ }
+}
+
+static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by IPA. This has to invalidate any structures that
+ * contain only stage 2 translation information, but does not need
+ * to apply to structures that contain combined stage 1 and stage 2
+ * translation information.
+ * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 48);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
+}
+
+static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
{
CPUState *other_cs;
- int asid = extract64(value, 48, 16);
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 48);
CPU_FOREACH(other_cs) {
- tlb_flush(other_cs, asid == 0);
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
}
}
-static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
/* We don't implement EL2, so the only control on DC ZVA is the
* bit in the SCTLR which can prohibit access for EL0.
@@ -2319,13 +3008,14 @@ static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
int dzp_bit = 1 << 4;
/* DZP indicates whether DC ZVA access is allowed */
- if (aa64_zva_access(env, NULL) == CP_ACCESS_OK) {
+ if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
dzp_bit = 0;
}
return cpu->dcz_blocksize | dzp_bit;
}
-static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
if (!(env->pstate & PSTATE_SP)) {
/* Access to SP_EL0 is undefined if it's being used as
@@ -2364,6 +3054,24 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
tlb_flush(CPU(cpu), 1);
}
+static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
+ return CP_ACCESS_TRAP_FP_EL2;
+ }
+ if (env->cp15.cptr_el[3] & CPTR_TFP) {
+ return CP_ACCESS_TRAP_FP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
+}
+
static const ARMCPRegInfo v8_cp_reginfo[] = {
/* Minimal set of EL0-visible registers. This will need to be expanded
* significantly for system emulation of AArch64 CPUs.
@@ -2434,62 +3142,86 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NOP },
/* TLBI operations */
- { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_write },
- { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_is_write },
{ .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_is_write },
+ .writefn = tlbi_aa64_vmalle1is_write },
{ .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_asid_is_write },
+ .writefn = tlbi_aa64_vmalle1is_write },
{ .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_is_write },
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbiall_write },
+ .writefn = tlbi_aa64_vmalle1_write },
{ .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_vae1_write },
{ .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_asid_write },
+ .writefn = tlbi_aa64_vmalle1_write },
{ .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae1_write },
{ .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_va_write },
+ .writefn = tlbi_aa64_vae1_write },
{ .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_RAW,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1_write },
+ { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
#ifndef CONFIG_USER_ONLY
/* 64 bit address translation operations */
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
@@ -2504,6 +3236,31 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
.access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
+ .writefn = par_write },
#endif
/* TLB invalidate last level of translation table walk */
{ .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
@@ -2556,7 +3313,8 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[1]) },
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
/* We rely on the access checks not allowing the guest to write to the
* state field when SPSel indicates that it's being used as the stack
* pointer.
@@ -2574,6 +3332,49 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
.type = ARM_CP_NO_RAW,
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
+ { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
+ .access = PL2_RW, .accessfn = fpexc32_access },
+ { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
+ { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
+ { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
+ { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
+ { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
+ { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
+ { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
+ .resetvalue = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
+ { .name = "SDCR", .type = ARM_CP_ALIAS,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .writefn = sdcr_write,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
REGINFO_SENTINEL
};
@@ -2598,9 +3399,36 @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
{ .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
.opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 6, .crm = 2,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@@ -2613,6 +3441,35 @@ static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
{ .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
.resetvalue = 0 },
+ { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -2646,31 +3503,22 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
.writefn = hcr_write },
- { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
- .access = PL2_RW, .resetvalue = 0,
- .writefn = dacr_write, .raw_writefn = raw_write,
- .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
.access = PL2_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
{ .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
- { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
- .access = PL2_RW, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
{ .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
.access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[6]) },
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
.access = PL2_RW, .writefn = vbar_write,
@@ -2692,11 +3540,50 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
.access = PL2_RW, .type = ARM_CP_ALIAS,
.fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
.access = PL2_RW, .writefn = vmsa_tcr_el1_write,
.resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
+ { .name = "VTCR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .type = ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL2_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
+ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 6, .crm = 2,
+ .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
+ .writefn = vttbr_write },
+ { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .writefn = vttbr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
{ .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
.access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
@@ -2715,18 +3602,129 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
.type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbiall_write },
+ .writefn = tlbi_aa64_alle2_write },
{ .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
.type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
{ .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
.type = ARM_CP_NO_RAW, .access = PL2_W,
- .writefn = tlbi_aa64_vaa_write },
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
+#ifndef CONFIG_USER_ONLY
+ /* Unlike the other EL2-related AT operations, these must
+ * UNDEF from EL3 if EL2 is not implemented, which is why we
+ * define them here rather than with the rest of the AT ops.
+ */
+ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+ * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+ * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+ * to behave as if SCR.NS was 1.
+ */
+ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
+ /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
+ * reset values as IMPDEF. We choose to reset to 3 to comply with
+ * both ARMv7 and ARMv8.
+ */
+ .access = PL2_RW, .resetvalue = 3,
+ .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
+ { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
+ .writefn = gt_cntvoff_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
+ { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
+ .writefn = gt_cntvoff_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
+ { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
+ .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
+ .resetfn = gt_hyp_timer_reset,
+ .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
+ { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
+ .resetvalue = 0,
+ .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
+#endif
+ /* The only field of MDCR_EL2 that has a defined architectural reset value
+ * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
+ * don't impelment any PMU event counters, so using zero as a reset
+ * value for MDCR_EL2 is okay
+ */
+ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
+ { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
+ { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
REGINFO_SENTINEL
};
+static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
+ * At Secure EL1 it traps to EL3.
+ */
+ if (arm_current_el(env) == 3) {
+ return CP_ACCESS_OK;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
+ if (isread) {
+ return CP_ACCESS_OK;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
@@ -2734,7 +3732,8 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.resetvalue = 0, .writefn = scr_write },
{ .name = "SCR", .type = ARM_CP_ALIAS,
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
- .access = PL3_RW, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
.writefn = scr_write },
{ .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
@@ -2744,18 +3743,10 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
.access = PL3_RW, .resetvalue = 0,
.fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
- /* TODO: Implement NSACR trapping of secure EL1 accesses to EL3 */
- { .name = "NSACR", .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
- .access = PL3_W | PL1_R, .resetvalue = 0,
- .fieldoffset = offsetof(CPUARMState, cp15.nsacr) },
{ .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
- .access = PL3_RW, .writefn = vbar_write, .resetvalue = 0,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .writefn = vbar_write, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
- { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_ALIAS, /* reset handled by AArch32 view */
- .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
- .access = PL3_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
- .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]) },
{ .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
.access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
@@ -2771,7 +3762,6 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.access = PL3_RW,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
- .type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
.access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
{ .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
@@ -2780,7 +3770,8 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[7]) },
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
.access = PL3_RW, .writefn = vbar_write,
@@ -2790,10 +3781,51 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
.opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
.access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
.fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
+ { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
+ { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3_write },
+ { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
+ { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
REGINFO_SENTINEL
};
-static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
+static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
/* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
* but the AArch32 CTR has its own reginfo struct)
@@ -2804,6 +3836,23 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
return CP_ACCESS_OK;
}
+static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Writes to OSLAR_EL1 may update the OS lock status, which can be
+ * read via a bit in OSLSR_EL1.
+ */
+ int oslock;
+
+ if (ri->state == ARM_CP_STATE_AA32) {
+ oslock = (value == 0xC5ACCE55);
+ } else {
+ oslock = value & 1;
+ }
+
+ env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
+}
+
static const ARMCPRegInfo debug_cp_reginfo[] = {
/* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
* debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
@@ -2812,16 +3861,19 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
* accessor.
*/
{ .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL0_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL0_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
/* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
{ .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
- .access = PL1_RW,
+ .access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
.resetvalue = 0 },
/* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
@@ -2830,22 +3882,30 @@ static const ARMCPRegInfo debug_cp_reginfo[] = {
{ .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.type = ARM_CP_ALIAS,
- .access = PL1_R,
+ .access = PL1_R, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
- /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
{ .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
- .access = PL1_W, .type = ARM_CP_NOP },
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .accessfn = access_tdosa,
+ .writefn = oslar_write },
+ { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
+ .access = PL1_R, .resetvalue = 10,
+ .accessfn = access_tdosa,
+ .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
/* Dummy OSDLR_EL1: 32-bit Linux will read this */
{ .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
- .access = PL1_RW, .type = ARM_CP_NOP },
+ .access = PL1_RW, .accessfn = access_tdosa,
+ .type = ARM_CP_NOP },
/* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
* implement vector catch debug events yet.
*/
{ .name = "DBGVCR",
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_NOP },
+ .access = PL1_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
REGINFO_SENTINEL
};
@@ -3110,7 +4170,8 @@ static void define_debug_regs(ARMCPU *cpu)
int wrps, brps, ctx_cmps;
ARMCPRegInfo dbgdidr = {
.name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
+ .access = PL0_R, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
};
/* Note that all these register fields hold "number of Xs minus 1". */
@@ -3141,13 +4202,13 @@ static void define_debug_regs(ARMCPU *cpu)
ARMCPRegInfo dbgregs[] = {
{ .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
- .access = PL1_RW,
+ .access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
.writefn = dbgbvr_write, .raw_writefn = raw_write
},
{ .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
- .access = PL1_RW,
+ .access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
.writefn = dbgbcr_write, .raw_writefn = raw_write
},
@@ -3160,13 +4221,13 @@ static void define_debug_regs(ARMCPU *cpu)
ARMCPRegInfo dbgregs[] = {
{ .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
- .access = PL1_RW,
+ .access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
.writefn = dbgwvr_write, .raw_writefn = raw_write
},
{ .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
- .access = PL1_RW,
+ .access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
.writefn = dbgwcr_write, .raw_writefn = raw_write
},
@@ -3252,12 +4313,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_isar5 },
- /* 6..7 are as yet unallocated and must RAZ */
- { .name = "ID_ISAR6", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = 0 },
- { .name = "ID_ISAR7", .cp = 15, .crn = 0, .crm = 2,
- .opc1 = 0, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST,
+ { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_mmfr4 },
+ /* 7 is as yet unallocated and must RAZ */
+ { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = 0 },
REGINFO_SENTINEL
};
@@ -3311,7 +4374,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V8)) {
- /* AArch64 ID registers, which all have impdef reset values */
+ /* AArch64 ID registers, which all have impdef reset values.
+ * Note that within the ID register ranges the unused slots
+ * must all RAZ, not UNDEF; future architecture versions may
+ * define new registers here.
+ */
ARMCPRegInfo v8_idregs[] = {
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
@@ -3321,6 +4388,30 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64pfr1},
+ { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -3334,6 +4425,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64dfr1 },
+ { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -3342,6 +4441,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64afr1 },
+ { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -3350,6 +4457,30 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64isar1 },
+ { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -3358,6 +4489,30 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_aa64mmfr1 },
+ { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
{ .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -3370,6 +4525,42 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->mvfr2 },
+ { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid1 },
+ { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid1 },
REGINFO_SENTINEL
};
/* RVBAR_EL1 is only implemented if EL1 is the highest EL */
@@ -3386,6 +4577,30 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, v8_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_EL2)) {
+ uint64_t vmpidr_def = mpidr_read_val(env);
+ ARMCPRegInfo vpidr_regs[] = {
+ { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .resetvalue = vmpidr_def,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
+ { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW,
+ .resetvalue = vmpidr_def,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vpidr_regs);
define_arm_cp_regs(cpu, el2_cp_reginfo);
/* RVBAR_EL2 is only implemented if EL2 is the highest EL */
if (!arm_feature(env, ARM_FEATURE_EL3)) {
@@ -3401,18 +4616,82 @@ void register_cp_regs_for_features(ARMCPU *cpu)
* register the no_el2 reginfos.
*/
if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
+ * of MIDR_EL1 and MPIDR_EL1.
+ */
+ ARMCPRegInfo vpidr_regs[] = {
+ { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_CONST, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_NO_RAW,
+ .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vpidr_regs);
define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
}
}
if (arm_feature(env, ARM_FEATURE_EL3)) {
define_arm_cp_regs(cpu, el3_cp_reginfo);
- ARMCPRegInfo rvbar = {
- .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
- .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar
+ ARMCPRegInfo el3_regs[] = {
+ { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
+ { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL3_RW,
+ .raw_writefn = raw_write, .writefn = sctlr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
+ .resetvalue = cpu->reset_sctlr },
+ REGINFO_SENTINEL
};
- define_one_arm_cp_reg(cpu, &rvbar);
+
+ define_arm_cp_regs(cpu, el3_regs);
}
+ /* The behaviour of NSACR is sufficiently various that we don't
+ * try to describe it in a single reginfo:
+ * if EL3 is 64 bit, then trap to EL3 from S EL1,
+ * reads as constant 0xc00 from NS EL1 and NS EL2
+ * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
+ * if v7 without EL3, register doesn't exist
+ * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR", .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .accessfn = nsacr_access,
+ .resetvalue = 0xc00
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ } else {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR",
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL3_RW | PL1_R,
+ .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ }
+ } else {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR", .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL1_R,
+ .resetvalue = 0xc00
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ }
+ }
+
if (arm_feature(env, ARM_FEATURE_MPU)) {
if (arm_feature(env, ARM_FEATURE_V6)) {
/* PMSAv6 not implemented */
@@ -3478,6 +4757,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_R, .resetvalue = cpu->midr,
.writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
+ .readfn = midr_read,
.fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
.type = ARM_CP_OVERRIDE },
/* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
@@ -3501,7 +4781,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
{ .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
+ .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
+ .readfn = midr_read },
/* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
{ .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
@@ -3584,13 +4866,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
}
if (arm_feature(env, ARM_FEATURE_AUXCR)) {
- ARMCPRegInfo auxcr = {
- .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
- .access = PL1_RW, .type = ARM_CP_CONST,
- .resetvalue = cpu->reset_auxcr
+ ARMCPRegInfo auxcr_reginfo[] = {
+ { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST,
+ .resetvalue = cpu->reset_auxcr },
+ { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
};
- define_one_arm_cp_reg(cpu, &auxcr);
+ define_arm_cp_regs(cpu, auxcr_reginfo);
}
if (arm_feature(env, ARM_FEATURE_CBAR)) {
@@ -4064,23 +5355,47 @@ void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
/* Helper coprocessor reset function for do-nothing-on-reset registers */
}
-static int bad_mode_switch(CPUARMState *env, int mode)
+static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
{
/* Return true if it is not valid for us to switch to
* this CPU mode (ie all the UNPREDICTABLE cases in
* the ARM ARM CPSRWriteByInstr pseudocode).
*/
+
+ /* Changes to or from Hyp via MSR and CPS are illegal. */
+ if (write_type == CPSRWriteByInstr &&
+ ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
+ mode == ARM_CPU_MODE_HYP)) {
+ return 1;
+ }
+
switch (mode) {
case ARM_CPU_MODE_USR:
+ return 0;
case ARM_CPU_MODE_SYS:
case ARM_CPU_MODE_SVC:
case ARM_CPU_MODE_ABT:
case ARM_CPU_MODE_UND:
case ARM_CPU_MODE_IRQ:
case ARM_CPU_MODE_FIQ:
+ /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
+ * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
+ */
+ /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
+ * and CPS are treated as illegal mode changes.
+ */
+ if (write_type == CPSRWriteByInstr &&
+ (env->cp15.hcr_el2 & HCR_TGE) &&
+ (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
+ !arm_is_secure_below_el3(env)) {
+ return 1;
+ }
return 0;
+ case ARM_CPU_MODE_HYP:
+ return !arm_feature(env, ARM_FEATURE_EL2)
+ || arm_current_el(env) < 2 || arm_is_secure(env);
case ARM_CPU_MODE_MON:
- return !arm_is_secure(env);
+ return arm_current_el(env) < 3;
default:
return 1;
}
@@ -4097,7 +5412,8 @@ uint32_t cpsr_read(CPUARMState *env)
| (env->GE << 16) | (env->daif & CPSR_AIF);
}
-void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type)
{
uint32_t changed_daif;
@@ -4131,7 +5447,7 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
* In a V8 implementation, it is permitted for privileged software to
* change the CPSR A/F bits regardless of the SCR.AW/FW bits.
*/
- if (!arm_feature(env, ARM_FEATURE_V8) &&
+ if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
arm_feature(env, ARM_FEATURE_EL3) &&
!arm_feature(env, ARM_FEATURE_EL2) &&
!arm_is_secure(env)) {
@@ -4178,13 +5494,31 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
env->daif &= ~(CPSR_AIF & mask);
env->daif |= val & CPSR_AIF & mask;
- if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
- if (bad_mode_switch(env, val & CPSR_M)) {
- /* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
- * We choose to ignore the attempt and leave the CPSR M field
- * untouched.
+ if (write_type != CPSRWriteRaw &&
+ ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
+ /* Note that we can only get here in USR mode if this is a
+ * gdb stub write; for this case we follow the architectural
+ * behaviour for guest writes in USR mode of ignoring an attempt
+ * to switch mode. (Those are caught by translate.c for writes
+ * triggered by guest instructions.)
*/
mask &= ~CPSR_M;
+ } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
+ /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
+ * v7, and has defined behaviour in v8:
+ * + leave CPSR.M untouched
+ * + allow changes to the other CPSR fields
+ * + set PSTATE.IL
+ * For user changes via the GDB stub, we don't set PSTATE.IL,
+ * as this would be unnecessarily harsh for a user error.
+ */
+ mask &= ~CPSR_M;
+ if (write_type != CPSRWriteByGDBStub &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ mask |= CPSR_IL;
+ val |= CPSR_IL;
+ }
} else {
switch_mode(env, val & CPSR_M);
}
@@ -4233,17 +5567,7 @@ uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
uint32_t HELPER(rbit)(uint32_t x)
{
- x = ((x & 0xff000000) >> 24)
- | ((x & 0x00ff0000) >> 8)
- | ((x & 0x0000ff00) << 8)
- | ((x & 0x000000ff) << 24);
- x = ((x & 0xf0f0f0f0) >> 4)
- | ((x & 0x0f0f0f0f) << 4);
- x = ((x & 0x88888888) >> 3)
- | ((x & 0x44444444) >> 1)
- | ((x & 0x22222222) << 1)
- | ((x & 0x11111111) << 3);
- return x;
+ return revbit32(x);
}
#if defined(CONFIG_USER_ONLY)
@@ -4273,21 +5597,6 @@ void switch_mode(CPUARMState *env, int mode)
}
}
-void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
-{
- ARMCPU *cpu = arm_env_get_cpu(env);
-
- cpu_abort(CPU(cpu), "banked r13 write\n");
-}
-
-uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
-{
- ARMCPU *cpu = arm_env_get_cpu(env);
-
- cpu_abort(CPU(cpu), "banked r13 read\n");
- return 0;
-}
-
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure)
{
@@ -4301,31 +5610,6 @@ void aarch64_sync_64_to_32(CPUARMState *env)
#else
-/* Map CPU modes onto saved register banks. */
-int bank_number(int mode)
-{
- switch (mode) {
- case ARM_CPU_MODE_USR:
- case ARM_CPU_MODE_SYS:
- return 0;
- case ARM_CPU_MODE_SVC:
- return 1;
- case ARM_CPU_MODE_ABT:
- return 2;
- case ARM_CPU_MODE_UND:
- return 3;
- case ARM_CPU_MODE_IRQ:
- return 4;
- case ARM_CPU_MODE_FIQ:
- return 5;
- case ARM_CPU_MODE_HYP:
- return 6;
- case ARM_CPU_MODE_MON:
- return 7;
- }
- hw_error("bank number requested for bad CPSR mode value 0x%x\n", mode);
-}
-
void switch_mode(CPUARMState *env, int mode)
{
int old_mode;
@@ -4391,7 +5675,7 @@ void switch_mode(CPUARMState *env, int mode)
* BIT IRQ IMO Non-secure Secure
* EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
*/
-const int8_t target_el_table[2][2][2][2][2][4] = {
+static const int8_t target_el_table[2][2][2][2][2][4] = {
{{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
{/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
{{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
@@ -4417,11 +5701,22 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure)
{
CPUARMState *env = cs->env_ptr;
- int rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ int rw;
int scr;
int hcr;
int target_el;
- int is64 = arm_el_is_aa64(env, 3);
+ /* Is the highest EL AArch64? */
+ int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ } else {
+ /* Either EL2 is the highest EL (and so the EL2 register width
+ * is given by is64); or there is no EL2 or EL3, in which case
+ * the value of 'rw' does not affect the table lookup anyway.
+ */
+ rw = is64;
+ }
switch (excp_idx) {
case EXCP_IRQ:
@@ -4558,11 +5853,13 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
case EXCP_BKPT:
if (semihosting_enabled()) {
int nr;
- nr = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
+ nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
if (nr == 0xab) {
env->regs[15] += 2;
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%x\n",
+ env->regs[0]);
env->regs[0] = do_arm_semihosting(env);
- qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
return;
}
}
@@ -4655,35 +5952,35 @@ void aarch64_sync_32_to_64(CPUARMState *env)
}
if (mode == ARM_CPU_MODE_IRQ) {
- env->xregs[16] = env->regs[13];
- env->xregs[17] = env->regs[14];
+ env->xregs[16] = env->regs[14];
+ env->xregs[17] = env->regs[13];
} else {
- env->xregs[16] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
- env->xregs[17] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
}
if (mode == ARM_CPU_MODE_SVC) {
- env->xregs[18] = env->regs[13];
- env->xregs[19] = env->regs[14];
+ env->xregs[18] = env->regs[14];
+ env->xregs[19] = env->regs[13];
} else {
- env->xregs[18] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
- env->xregs[19] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
}
if (mode == ARM_CPU_MODE_ABT) {
- env->xregs[20] = env->regs[13];
- env->xregs[21] = env->regs[14];
+ env->xregs[20] = env->regs[14];
+ env->xregs[21] = env->regs[13];
} else {
- env->xregs[20] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
- env->xregs[21] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
}
if (mode == ARM_CPU_MODE_UND) {
- env->xregs[22] = env->regs[13];
- env->xregs[23] = env->regs[14];
+ env->xregs[22] = env->regs[14];
+ env->xregs[23] = env->regs[13];
} else {
- env->xregs[22] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
- env->xregs[23] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
}
/* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
@@ -4760,35 +6057,35 @@ void aarch64_sync_64_to_32(CPUARMState *env)
}
if (mode == ARM_CPU_MODE_IRQ) {
- env->regs[13] = env->xregs[16];
- env->regs[14] = env->xregs[17];
+ env->regs[14] = env->xregs[16];
+ env->regs[13] = env->xregs[17];
} else {
- env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
- env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
+ env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
+ env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
}
if (mode == ARM_CPU_MODE_SVC) {
- env->regs[13] = env->xregs[18];
- env->regs[14] = env->xregs[19];
+ env->regs[14] = env->xregs[18];
+ env->regs[13] = env->xregs[19];
} else {
- env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
- env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
+ env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
+ env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
}
if (mode == ARM_CPU_MODE_ABT) {
- env->regs[13] = env->xregs[20];
- env->regs[14] = env->xregs[21];
+ env->regs[14] = env->xregs[20];
+ env->regs[13] = env->xregs[21];
} else {
- env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
- env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
+ env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
+ env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
}
if (mode == ARM_CPU_MODE_UND) {
- env->regs[13] = env->xregs[22];
- env->regs[14] = env->xregs[23];
+ env->regs[14] = env->xregs[22];
+ env->regs[13] = env->xregs[23];
} else {
- env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
- env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
+ env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
+ env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
}
/* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
@@ -4810,8 +6107,7 @@ void aarch64_sync_64_to_32(CPUARMState *env)
env->regs[15] = env->pc;
}
-/* Handle a CPU exception. */
-void arm_cpu_do_interrupt(CPUState *cs)
+static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -4821,16 +6117,6 @@ void arm_cpu_do_interrupt(CPUState *cs)
uint32_t offset;
uint32_t moe;
- assert(!IS_M(env));
-
- arm_log_exception(cs->exception_index);
-
- if (arm_is_psci_call(cpu, cs->exception_index)) {
- arm_handle_psci_call(cpu);
- qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
- return;
- }
-
/* If this is a debug exception we must update the DBGDSCR.MOE bits */
switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
case EC_BREAKPOINT:
@@ -4868,25 +6154,6 @@ void arm_cpu_do_interrupt(CPUState *cs)
offset = 4;
break;
case EXCP_SWI:
- if (semihosting_enabled()) {
- /* Check for semihosting interrupt. */
- if (env->thumb) {
- mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code)
- & 0xff;
- } else {
- mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code)
- & 0xffffff;
- }
- /* Only intercept calls from privileged modes, to provide some
- semblance of security. */
- if (((mask == 0x123456 && !env->thumb)
- || (mask == 0xab && env->thumb))
- && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
- env->regs[0] = do_arm_semihosting(env);
- qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
- return;
- }
- }
new_mode = ARM_CPU_MODE_SVC;
addr = 0x08;
mask = CPSR_I;
@@ -4894,17 +6161,6 @@ void arm_cpu_do_interrupt(CPUState *cs)
offset = 0;
break;
case EXCP_BKPT:
- /* See if this is a semihosting syscall. */
- if (env->thumb && semihosting_enabled()) {
- mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff;
- if (mask == 0xab
- && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
- env->regs[15] += 2;
- env->regs[0] = do_arm_semihosting(env);
- qemu_log_mask(CPU_LOG_INT, "...handled as semihosting call\n");
- return;
- }
- }
env->exception.fsr = 2;
/* Fall through to prefetch abort. */
case EXCP_PREFETCH_ABORT:
@@ -4990,6 +6246,11 @@ void arm_cpu_do_interrupt(CPUState *cs)
env->condexec_bits = 0;
/* Switch to the new mode, and to the correct instruction set. */
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
+ /* Set new mode endianness */
+ env->uncached_cpsr &= ~CPSR_E;
+ if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
+ env->uncached_cpsr |= ~CPSR_E;
+ }
env->daif |= mask;
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
* and we should just guard the thumb mode on V4 */
@@ -4998,9 +6259,227 @@ void arm_cpu_do_interrupt(CPUState *cs)
}
env->regs[14] = env->regs[15] + offset;
env->regs[15] = addr;
- cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
}
+/* Handle exception entry to a target EL which is using AArch64 */
+static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int new_el = env->exception.target_el;
+ target_ulong addr = env->cp15.vbar_el[new_el];
+ unsigned int new_mode = aarch64_pstate_mode(new_el, true);
+
+ if (arm_current_el(env) < new_el) {
+ /* Entry vector offset depends on whether the implemented EL
+ * immediately lower than the target level is using AArch32 or AArch64
+ */
+ bool is_aa64;
+
+ switch (new_el) {
+ case 3:
+ is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
+ break;
+ case 2:
+ is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
+ break;
+ case 1:
+ is_aa64 = is_a64(env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_aa64) {
+ addr += 0x400;
+ } else {
+ addr += 0x600;
+ }
+ } else if (pstate_read(env) & PSTATE_SP) {
+ addr += 0x200;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ env->cp15.far_el[new_el] = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
+ env->cp15.far_el[new_el]);
+ /* fall through */
+ case EXCP_BKPT:
+ case EXCP_UDEF:
+ case EXCP_SWI:
+ case EXCP_HVC:
+ case EXCP_HYP_TRAP:
+ case EXCP_SMC:
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
+ break;
+ case EXCP_IRQ:
+ case EXCP_VIRQ:
+ addr += 0x80;
+ break;
+ case EXCP_FIQ:
+ case EXCP_VFIQ:
+ addr += 0x100;
+ break;
+ case EXCP_SEMIHOST:
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%" PRIx64 "\n",
+ env->xregs[0]);
+ env->xregs[0] = do_arm_semihosting(env);
+ return;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ }
+
+ if (is_a64(env)) {
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
+ aarch64_save_sp(env, arm_current_el(env));
+ env->elr_el[new_el] = env->pc;
+ } else {
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
+ if (!env->thumb) {
+ env->cp15.esr_el[new_el] |= 1 << 25;
+ }
+ env->elr_el[new_el] = env->regs[15];
+
+ aarch64_sync_32_to_64(env);
+
+ env->condexec_bits = 0;
+ }
+ qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
+ env->elr_el[new_el]);
+
+ pstate_write(env, PSTATE_DAIF | new_mode);
+ env->aarch64 = 1;
+ aarch64_restore_sp(env, new_el);
+
+ env->pc = addr;
+
+ qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
+ new_el, env->pc, pstate_read(env));
+}
+
+static inline bool check_for_semihosting(CPUState *cs)
+{
+ /* Check whether this exception is a semihosting call; if so
+ * then handle it and return true; otherwise return false.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (is_a64(env)) {
+ if (cs->exception_index == EXCP_SEMIHOST) {
+ /* This is always the 64-bit semihosting exception.
+ * The "is this usermode" and "is semihosting enabled"
+ * checks have been done at translate time.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%" PRIx64 "\n",
+ env->xregs[0]);
+ env->xregs[0] = do_arm_semihosting(env);
+ return true;
+ }
+ return false;
+ } else {
+ uint32_t imm;
+
+ /* Only intercept calls from privileged modes, to provide some
+ * semblance of security.
+ */
+ if (!semihosting_enabled() ||
+ ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)) {
+ return false;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_SWI:
+ /* Check for semihosting interrupt. */
+ if (env->thumb) {
+ imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
+ & 0xff;
+ if (imm == 0xab) {
+ break;
+ }
+ } else {
+ imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
+ & 0xffffff;
+ if (imm == 0x123456) {
+ break;
+ }
+ }
+ return false;
+ case EXCP_BKPT:
+ /* See if this is a semihosting syscall. */
+ if (env->thumb) {
+ imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
+ & 0xff;
+ if (imm == 0xab) {
+ env->regs[15] += 2;
+ break;
+ }
+ }
+ return false;
+ default:
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%x\n",
+ env->regs[0]);
+ env->regs[0] = do_arm_semihosting(env);
+ return true;
+ }
+}
+
+/* Handle a CPU exception for A and R profile CPUs.
+ * Do any appropriate logging, handle PSCI calls, and then hand off
+ * to the AArch64-entry or AArch32-entry function depending on the
+ * target exception level's register width.
+ */
+void arm_cpu_do_interrupt(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int new_el = env->exception.target_el;
+
+ assert(!IS_M(env));
+
+ arm_log_exception(cs->exception_index);
+ qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
+ new_el);
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && !excp_is_internal(cs->exception_index)) {
+ qemu_log_mask(CPU_LOG_INT, "...with ESR %x/0x%" PRIx32 "\n",
+ env->exception.syndrome >> ARM_EL_EC_SHIFT,
+ env->exception.syndrome);
+ }
+
+ if (arm_is_psci_call(cpu, cs->exception_index)) {
+ arm_handle_psci_call(cpu);
+ qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
+ return;
+ }
+
+ /* Semihosting semantics depend on the register width of the
+ * code that caused the exception, not the target exception level,
+ * so must be handled here.
+ */
+ if (check_for_semihosting(cs)) {
+ return;
+ }
+
+ assert(!excp_is_internal(cs->exception_index));
+ if (arm_el_is_aa64(env, new_el)) {
+ arm_cpu_do_interrupt_aarch64(cs);
+ } else {
+ arm_cpu_do_interrupt_aarch32(cs);
+ }
+
+ if (!kvm_enabled()) {
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+}
/* Return the exception level which controls this address translation regime */
static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
@@ -5058,12 +6537,17 @@ static inline bool regime_translation_disabled(CPUARMState *env,
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
+static inline bool regime_translation_big_endian(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
+}
+
/* Return the TCR controlling this translation regime */
static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
if (mmu_idx == ARMMMUIdx_S2NS) {
- /* TODO: return VTCR_EL2 */
- g_assert_not_reached();
+ return &env->cp15.vtcr_el2;
}
return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
}
@@ -5073,8 +6557,7 @@ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
int ttbrn)
{
if (mmu_idx == ARMMMUIdx_S2NS) {
- /* TODO: return VTTBR_EL2 */
- g_assert_not_reached();
+ return env->cp15.vttbr_el2;
}
if (ttbrn == 0) {
return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
@@ -5098,6 +6581,18 @@ static inline bool regime_using_lpae_format(CPUARMState *env,
return false;
}
+/* Returns true if the stage 1 translation regime is using LPAE format page
+ * tables. Used when raising alignment exceptions, whose FSR changes depending
+ * on whether the long or short descriptor format is in use. */
+bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
+
+ return regime_using_lpae_format(env, mmu_idx);
+}
+
static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
{
switch (mmu_idx) {
@@ -5196,6 +6691,28 @@ simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
}
+/* Translate S2 section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @s2ap: The 2-bit stage2 access permissions (S2AP)
+ * @xn: XN (execute-never) bit
+ */
+static int get_S2prot(CPUARMState *env, int s2ap, int xn)
+{
+ int prot = 0;
+
+ if (s2ap & 1) {
+ prot |= PAGE_READ;
+ }
+ if (s2ap & 2) {
+ prot |= PAGE_WRITE;
+ }
+ if (!xn) {
+ prot |= PAGE_EXEC;
+ }
+ return prot;
+}
+
/* Translate section/page access permissions to protection flags
*
* @env: CPUARMState
@@ -5300,6 +6817,32 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
return true;
}
+/* Translate a S1 pagetable walk through S2 if needed. */
+static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
+ hwaddr addr, MemTxAttrs txattrs,
+ uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
+ !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
+ target_ulong s2size;
+ hwaddr s2pa;
+ int s2prot;
+ int ret;
+
+ ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
+ &txattrs, &s2prot, &s2size, fsr, fi);
+ if (ret) {
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ return ~0;
+ }
+ addr = s2pa;
+ }
+ return addr;
+}
+
/* All loads done in the course of a page table walk go through here.
* TODO: rather than ignoring errors from physical memory reads (which
* are external aborts in ARM terminology) we should propagate this
@@ -5307,26 +6850,55 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
* was being done for a CPU load/store or an address translation instruction
* (but not if it was for a debug access).
*/
-static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure)
+static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
MemTxAttrs attrs = {};
+ AddressSpace *as;
attrs.secure = is_secure;
- return address_space_ldl(cs->as, addr, attrs, NULL);
+ as = arm_addressspace(cs, attrs);
+ addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ return address_space_ldl_be(as, addr, attrs, NULL);
+ } else {
+ return address_space_ldl_le(as, addr, attrs, NULL);
+ }
}
-static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure)
+static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
MemTxAttrs attrs = {};
+ AddressSpace *as;
attrs.secure = is_secure;
- return address_space_ldq(cs->as, addr, attrs, NULL);
+ as = arm_addressspace(cs, attrs);
+ addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ return address_space_ldq_be(as, addr, attrs, NULL);
+ } else {
+ return address_space_ldq_le(as, addr, attrs, NULL);
+ }
}
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
- target_ulong *page_size, uint32_t *fsr)
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
int code;
@@ -5346,7 +6918,8 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
code = 5;
goto do_fault;
}
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
if (regime_el(env, mmu_idx) == 1) {
@@ -5382,7 +6955,8 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
/* Fine pagetable. */
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
}
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
switch (desc & 3) {
case 0: /* Page translation fault. */
code = 7;
@@ -5439,7 +7013,8 @@ do_fault:
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, uint32_t *fsr)
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
int code;
@@ -5462,7 +7037,8 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
code = 5;
goto do_fault;
}
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
type = (desc & 3);
if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
/* Section translation fault, or attempt to use the encoding
@@ -5513,7 +7089,8 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
ns = extract32(desc, 3, 1);
/* Lookup l2 entry. */
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
- desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx));
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
switch (desc & 3) {
case 0: /* Page translation fault. */
@@ -5587,17 +7164,87 @@ typedef enum {
permission_fault = 3,
} MMUFaultType;
+/*
+ * check_s2_mmu_setup
+ * @cpu: ARMCPU
+ * @is_aa64: True if the translation regime is in AArch64 state
+ * @startlevel: Suggested starting level
+ * @inputsize: Bitsize of IPAs
+ * @stride: Page-table stride (See the ARM ARM)
+ *
+ * Returns true if the suggested S2 translation parameters are OK and
+ * false otherwise.
+ */
+static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
+ int inputsize, int stride)
+{
+ const int grainsize = stride + 3;
+ int startsizecheck;
+
+ /* Negative levels are never allowed. */
+ if (level < 0) {
+ return false;
+ }
+
+ startsizecheck = inputsize - ((3 - level) * stride + grainsize);
+ if (startsizecheck < 1 || startsizecheck > stride + 4) {
+ return false;
+ }
+
+ if (is_aa64) {
+ CPUARMState *env = &cpu->env;
+ unsigned int pamax = arm_pamax(cpu);
+
+ switch (stride) {
+ case 13: /* 64KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 42)) {
+ return false;
+ }
+ break;
+ case 11: /* 16KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 40)) {
+ return false;
+ }
+ break;
+ case 9: /* 4KB Pages. */
+ if (level == 0 && pamax <= 42) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Inputsize checks. */
+ if (inputsize > pamax &&
+ (arm_el_is_aa64(env, 1) || inputsize > 40)) {
+ /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
+ return false;
+ }
+ } else {
+ /* AArch32 only supports 4KB pages. Assert on that. */
+ assert(stride == 9);
+
+ if (level == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
int access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
- target_ulong *page_size_ptr, uint32_t *fsr)
+ target_ulong *page_size_ptr, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
- CPUState *cs = CPU(arm_env_get_cpu(env));
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
/* Read an LPAE long-descriptor translation table. */
MMUFaultType fault_type = translation_fault;
- uint32_t level = 1;
- uint32_t epd;
- int32_t tsz;
+ uint32_t level;
+ uint32_t epd = 0;
+ int32_t t0sz, t1sz;
uint32_t tg;
uint64_t ttbr;
int ttbr_select;
@@ -5605,13 +7252,15 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
uint32_t tableattrs;
target_ulong page_size;
uint32_t attrs;
- int32_t granule_sz = 9;
- int32_t va_size = 32;
+ int32_t stride = 9;
+ int32_t va_size;
+ int inputsize;
int32_t tbi = 0;
TCR *tcr = regime_tcr(env, mmu_idx);
int ap, ns, xn, pxn;
uint32_t el = regime_el(env, mmu_idx);
bool ttbr1_valid = true;
+ uint64_t descaddrmask;
/* TODO:
* This code does not handle the different format TCR for VTCR_EL2.
@@ -5620,9 +7269,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
* support for those page table walks.
*/
if (arm_el_is_aa64(env, el)) {
+ level = 0;
va_size = 64;
if (el > 1) {
- tbi = extract64(tcr->raw_tcr, 20, 1);
+ if (mmu_idx != ARMMMUIdx_S2NS) {
+ tbi = extract64(tcr->raw_tcr, 20, 1);
+ }
} else {
if (extract64(address, 55, 1)) {
tbi = extract64(tcr->raw_tcr, 38, 1);
@@ -5638,6 +7290,13 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
if (el > 1) {
ttbr1_valid = false;
}
+ } else {
+ level = 1;
+ va_size = 32;
+ /* There is no TTBR1 for EL2 */
+ if (el == 2) {
+ ttbr1_valid = false;
+ }
}
/* Determine whether this address is in the region controlled by
@@ -5645,12 +7304,28 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
* This is a Non-secure PL0/1 stage 1 translation, so controlled by
* TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
*/
- uint32_t t0sz = extract32(tcr->raw_tcr, 0, 6);
if (va_size == 64) {
+ /* AArch64 translation. */
+ t0sz = extract32(tcr->raw_tcr, 0, 6);
t0sz = MIN(t0sz, 39);
t0sz = MAX(t0sz, 16);
+ } else if (mmu_idx != ARMMMUIdx_S2NS) {
+ /* AArch32 stage 1 translation. */
+ t0sz = extract32(tcr->raw_tcr, 0, 3);
+ } else {
+ /* AArch32 stage 2 translation. */
+ bool sext = extract32(tcr->raw_tcr, 4, 1);
+ bool sign = extract32(tcr->raw_tcr, 3, 1);
+ t0sz = sextract32(tcr->raw_tcr, 0, 4);
+
+ /* If the sign-extend bit is not the same as t0sz[3], the result
+ * is unpredictable. Flag this as a guest error. */
+ if (sign != sext) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "AArch32: VTCR.S / VTCR.T0SZ[3] missmatch\n");
+ }
}
- uint32_t t1sz = extract32(tcr->raw_tcr, 16, 6);
+ t1sz = extract32(tcr->raw_tcr, 16, 6);
if (va_size == 64) {
t1sz = MIN(t1sz, 39);
t1sz = MAX(t1sz, 16);
@@ -5683,15 +7358,17 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
*/
if (ttbr_select == 0) {
ttbr = regime_ttbr(env, mmu_idx, 0);
- epd = extract32(tcr->raw_tcr, 7, 1);
- tsz = t0sz;
+ if (el < 2) {
+ epd = extract32(tcr->raw_tcr, 7, 1);
+ }
+ inputsize = va_size - t0sz;
tg = extract32(tcr->raw_tcr, 14, 2);
if (tg == 1) { /* 64KB pages */
- granule_sz = 13;
+ stride = 13;
}
if (tg == 2) { /* 16KB pages */
- granule_sz = 11;
+ stride = 11;
}
} else {
/* We should only be here if TTBR1 is valid */
@@ -5699,19 +7376,19 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
ttbr = regime_ttbr(env, mmu_idx, 1);
epd = extract32(tcr->raw_tcr, 23, 1);
- tsz = t1sz;
+ inputsize = va_size - t1sz;
tg = extract32(tcr->raw_tcr, 30, 2);
if (tg == 3) { /* 64KB pages */
- granule_sz = 13;
+ stride = 13;
}
if (tg == 1) { /* 16KB pages */
- granule_sz = 11;
+ stride = 11;
}
}
/* Here we should have set up all the parameters for the translation:
- * va_size, ttbr, epd, tsz, granule_sz, tbi
+ * va_size, inputsize, ttbr, epd, stride, tbi
*/
if (epd) {
@@ -5721,32 +7398,67 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
goto do_fault;
}
- /* The starting level depends on the virtual address size (which can be
- * up to 48 bits) and the translation granule size. It indicates the number
- * of strides (granule_sz bits at a time) needed to consume the bits
- * of the input address. In the pseudocode this is:
- * level = 4 - RoundUp((inputsize - grainsize) / stride)
- * where their 'inputsize' is our 'va_size - tsz', 'grainsize' is
- * our 'granule_sz + 3' and 'stride' is our 'granule_sz'.
- * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
- * = 4 - (va_size - tsz - granule_sz - 3 + granule_sz - 1) / granule_sz
- * = 4 - (va_size - tsz - 4) / granule_sz;
- */
- level = 4 - (va_size - tsz - 4) / granule_sz;
+ if (mmu_idx != ARMMMUIdx_S2NS) {
+ /* The starting level depends on the virtual address size (which can
+ * be up to 48 bits) and the translation granule size. It indicates
+ * the number of strides (stride bits at a time) needed to
+ * consume the bits of the input address. In the pseudocode this is:
+ * level = 4 - RoundUp((inputsize - grainsize) / stride)
+ * where their 'inputsize' is our 'inputsize', 'grainsize' is
+ * our 'stride + 3' and 'stride' is our 'stride'.
+ * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
+ * = 4 - (inputsize - stride - 3 + stride - 1) / stride
+ * = 4 - (inputsize - 4) / stride;
+ */
+ level = 4 - (inputsize - 4) / stride;
+ } else {
+ /* For stage 2 translations the starting level is specified by the
+ * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
+ */
+ uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
+ uint32_t startlevel;
+ bool ok;
+
+ if (va_size == 32 || stride == 9) {
+ /* AArch32 or 4KB pages */
+ startlevel = 2 - sl0;
+ } else {
+ /* 16KB or 64KB pages */
+ startlevel = 3 - sl0;
+ }
+
+ /* Check that the starting level is valid. */
+ ok = check_s2_mmu_setup(cpu, va_size == 64, startlevel,
+ inputsize, stride);
+ if (!ok) {
+ fault_type = translation_fault;
+ goto do_fault;
+ }
+ level = startlevel;
+ }
/* Clear the vaddr bits which aren't part of the within-region address,
* so that we don't have to special case things when calculating the
* first descriptor address.
*/
- if (tsz) {
- address &= (1ULL << (va_size - tsz)) - 1;
+ if (va_size != inputsize) {
+ address &= (1ULL << inputsize) - 1;
}
- descmask = (1ULL << (granule_sz + 3)) - 1;
+ descmask = (1ULL << (stride + 3)) - 1;
/* Now we can extract the actual base address from the TTBR */
descaddr = extract64(ttbr, 0, 48);
- descaddr &= ~((1ULL << (va_size - tsz - (granule_sz * (4 - level)))) - 1);
+ descaddr &= ~((1ULL << (inputsize - (stride * (4 - level)))) - 1);
+
+ /* The address field in the descriptor goes up to bit 39 for ARMv7
+ * but up to bit 47 for ARMv8.
+ */
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ descaddrmask = 0xfffffffff000ULL;
+ } else {
+ descaddrmask = 0xfffffff000ULL;
+ }
/* Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
@@ -5758,16 +7470,20 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
uint64_t descriptor;
bool nstable;
- descaddr |= (address >> (granule_sz * (4 - level))) & descmask;
+ descaddr |= (address >> (stride * (4 - level))) & descmask;
descaddr &= ~7ULL;
nstable = extract32(tableattrs, 4, 1);
- descriptor = arm_ldq_ptw(cs, descaddr, !nstable);
+ descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
+ if (fi->s1ptw) {
+ goto do_fault;
+ }
+
if (!(descriptor & 1) ||
(!(descriptor & 2) && (level == 3))) {
/* Invalid, or the Reserved level 3 encoding */
goto do_fault;
}
- descaddr = descriptor & 0xfffffff000ULL;
+ descaddr = descriptor & descaddrmask;
if ((descriptor & 2) && (level < 3)) {
/* Table entry. The top five bits are attributes which may
@@ -5783,11 +7499,17 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
* These are basically the same thing, although the number
* of bits we pull in from the vaddr varies.
*/
- page_size = (1ULL << ((granule_sz * (4 - level)) + 3));
+ page_size = (1ULL << ((stride * (4 - level)) + 3));
descaddr |= (address & (page_size - 1));
- /* Extract attributes from the descriptor and merge with table attrs */
+ /* Extract attributes from the descriptor */
attrs = extract64(descriptor, 2, 10)
| (extract64(descriptor, 52, 12) << 10);
+
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ /* Stage 2 table descriptors do not include any attribute fields */
+ break;
+ }
+ /* Merge in attributes from table descriptors */
attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
/* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
@@ -5809,11 +7531,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
}
ap = extract32(attrs, 4, 2);
- ns = extract32(attrs, 3, 1);
xn = extract32(attrs, 12, 1);
- pxn = extract32(attrs, 11, 1);
- *prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn);
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ ns = true;
+ *prot = get_S2prot(env, ap, xn);
+ } else {
+ ns = extract32(attrs, 3, 1);
+ pxn = extract32(attrs, 11, 1);
+ *prot = get_S1prot(env, mmu_idx, va_size == 64, ap, ns, xn, pxn);
+ }
fault_type = permission_fault;
if (!(*prot & (1 << access_type))) {
@@ -5834,6 +7561,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
do_fault:
/* Long-descriptor format IFSR/DFSR value */
*fsr = (1 << 9) | (fault_type << 2) | level;
+ /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
+ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
return true;
}
@@ -6096,20 +7825,45 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
* @page_size: set to the size of the page containing phys_ptr
* @fsr: set to the DFSR/IFSR value on failure
*/
-static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
- target_ulong *page_size, uint32_t *fsr)
+static bool get_phys_addr(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
- /* TODO: when we support EL2 we should here call ourselves recursively
- * to do the stage 1 and then stage 2 translations. The arm_ld*_ptw
- * functions will also need changing to perform ARMMMUIdx_S2NS loads
- * rather than direct physical memory loads when appropriate.
- * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ /* Call ourselves recursively to do the stage 1 and then stage 2
+ * translations.
*/
- assert(!arm_feature(env, ARM_FEATURE_EL2));
- mmu_idx += ARMMMUIdx_S1NSE0;
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ hwaddr ipa;
+ int s2_prot;
+ int ret;
+
+ ret = get_phys_addr(env, address, access_type,
+ mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
+ prot, page_size, fsr, fi);
+
+ /* If S1 fails or S2 is disabled, return early. */
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
+ *phys_ptr = ipa;
+ return ret;
+ }
+
+ /* S1 is done. Now do S2 translation. */
+ ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
+ phys_ptr, attrs, &s2_prot,
+ page_size, fsr, fi);
+ fi->s2addr = ipa;
+ /* Combine the S1 and S2 perms. */
+ *prot &= s2_prot;
+ return ret;
+ } else {
+ /*
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
}
/* The page table entries may downgrade secure to non-secure, but
@@ -6158,13 +7912,13 @@ static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr);
+ attrs, prot, page_size, fsr, fi);
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr);
+ attrs, prot, page_size, fsr, fi);
} else {
return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
- prot, page_size, fsr);
+ prot, page_size, fsr, fi);
}
}
@@ -6173,7 +7927,8 @@ static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
* fsr with ARM DFSR/IFSR fault register format value on failure.
*/
bool arm_tlb_fill(CPUState *cs, vaddr address,
- int access_type, int mmu_idx, uint32_t *fsr)
+ int access_type, int mmu_idx, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -6184,7 +7939,7 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
MemTxAttrs attrs = {};
ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
- &attrs, &prot, &page_size, fsr);
+ &attrs, &prot, &page_size, fsr, fi);
if (!ret) {
/* Map a single [sub]page. */
phys_addr &= TARGET_PAGE_MASK;
@@ -6197,7 +7952,8 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
return ret;
}
-hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
@@ -6206,36 +7962,19 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
int prot;
bool ret;
uint32_t fsr;
- MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
- ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
- &attrs, &prot, &page_size, &fsr);
+ *attrs = (MemTxAttrs) {};
+
+ ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
+ attrs, &prot, &page_size, &fsr, &fi);
if (ret) {
return -1;
}
-
return phys_addr;
}
-void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
-{
- if ((env->uncached_cpsr & CPSR_M) == mode) {
- env->regs[13] = val;
- } else {
- env->banked_r13[bank_number(mode)] = val;
- }
-}
-
-uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
-{
- if ((env->uncached_cpsr & CPSR_M) == mode) {
- return env->regs[13];
- } else {
- return env->banked_r13[bank_number(mode)];
- }
-}
-
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
ARMCPU *cpu = arm_env_get_cpu(env);
@@ -6373,7 +8112,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
void *hostaddr[maxidx];
int try, i;
- unsigned mmu_idx = cpu_mmu_index(env);
+ unsigned mmu_idx = cpu_mmu_index(env, false);
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
for (try = 0; try < 2; try++) {
diff --git a/qemu/target-arm/helper.h b/qemu/target-arm/helper.h
index 827b33dfe..84aa63762 100644
--- a/qemu/target-arm/helper.h
+++ b/qemu/target-arm/helper.h
@@ -48,19 +48,23 @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
i32, i32, i32, i32)
DEF_HELPER_2(exception_internal, void, env, i32)
DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
+DEF_HELPER_1(setend, void, env)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_1(wfe, void, env)
DEF_HELPER_1(yield, void, env)
DEF_HELPER_1(pre_hvc, void, env)
DEF_HELPER_2(pre_smc, void, env, i32)
+DEF_HELPER_1(check_breakpoints, void, env)
+
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_2(cpsr_write_eret, void, env, i32)
DEF_HELPER_1(cpsr_read, i32, env)
DEF_HELPER_3(v7m_msr, void, env, i32, i32)
DEF_HELPER_2(v7m_mrs, i32, env, i32)
-DEF_HELPER_3(access_check_cp_reg, void, env, ptr, i32)
+DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
DEF_HELPER_2(get_cp_reg, i32, env, ptr)
DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
@@ -73,6 +77,9 @@ DEF_HELPER_1(exception_return, void, env)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
+DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
+
DEF_HELPER_2(get_user_reg, i32, env, i32)
DEF_HELPER_3(set_user_reg, void, env, i32, i32)
diff --git a/qemu/target-arm/internals.h b/qemu/target-arm/internals.h
index 924aff9d0..2e70272be 100644
--- a/qemu/target-arm/internals.h
+++ b/qemu/target-arm/internals.h
@@ -25,6 +25,16 @@
#ifndef TARGET_ARM_INTERNALS_H
#define TARGET_ARM_INTERNALS_H
+/* register banks for CPU modes */
+#define BANK_USRSYS 0
+#define BANK_SVC 1
+#define BANK_ABT 2
+#define BANK_UND 3
+#define BANK_IRQ 4
+#define BANK_FIQ 5
+#define BANK_HYP 6
+#define BANK_MON 7
+
static inline bool excp_is_internal(int excp)
{
/* Return true if this exception number represents a QEMU-internal
@@ -36,6 +46,7 @@ static inline bool excp_is_internal(int excp)
|| excp == EXCP_HALTED
|| excp == EXCP_EXCEPTION_EXIT
|| excp == EXCP_KERNEL_TRAP
+ || excp == EXCP_SEMIHOST
|| excp == EXCP_STREX;
}
@@ -58,6 +69,7 @@ static const char * const excnames[] = {
[EXCP_SMC] = "Secure Monitor Call",
[EXCP_VIRQ] = "Virtual IRQ",
[EXCP_VFIQ] = "Virtual FIQ",
+ [EXCP_SEMIHOST] = "Semihosting call",
};
static inline void arm_log_exception(int idx)
@@ -89,15 +101,39 @@ static inline void arm_log_exception(int idx)
static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
{
static const unsigned int map[4] = {
- [1] = 1, /* EL1. */
- [2] = 6, /* EL2. */
- [3] = 7, /* EL3. */
+ [1] = BANK_SVC, /* EL1. */
+ [2] = BANK_HYP, /* EL2. */
+ [3] = BANK_MON, /* EL3. */
};
assert(el >= 1 && el <= 3);
return map[el];
}
-int bank_number(int mode);
+/* Map CPU modes onto saved register banks. */
+static inline int bank_number(int mode)
+{
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_SYS:
+ return BANK_USRSYS;
+ case ARM_CPU_MODE_SVC:
+ return BANK_SVC;
+ case ARM_CPU_MODE_ABT:
+ return BANK_ABT;
+ case ARM_CPU_MODE_UND:
+ return BANK_UND;
+ case ARM_CPU_MODE_IRQ:
+ return BANK_IRQ;
+ case ARM_CPU_MODE_FIQ:
+ return BANK_FIQ;
+ case ARM_CPU_MODE_HYP:
+ return BANK_HYP;
+ case ARM_CPU_MODE_MON:
+ return BANK_MON;
+ }
+ g_assert_not_reached();
+}
+
void switch_mode(CPUARMState *, int);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
@@ -150,6 +186,31 @@ static inline void update_spsel(CPUARMState *env, uint32_t imm)
aarch64_restore_sp(env, cur_el);
}
+/*
+ * arm_pamax
+ * @cpu: ARMCPU
+ *
+ * Returns the implementation defined bit-width of physical addresses.
+ * The ARMv8 reference manuals refer to this as PAMax().
+ */
+static inline unsigned int arm_pamax(ARMCPU *cpu)
+{
+ static const unsigned int pamax_map[] = {
+ [0] = 32,
+ [1] = 36,
+ [2] = 40,
+ [3] = 42,
+ [4] = 44,
+ [5] = 48,
+ };
+ unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4);
+
+ /* id_aa64mmfr0 is a read-only register so values outside of the
+ * supported mappings can be considered an implementation error. */
+ assert(parange < ARRAY_SIZE(pamax_map));
+ return pamax_map[parange];
+}
+
/* Return true if extended addresses are enabled.
* This is always the case if our translation regime is 64 bit,
* but depends on TTBCR.EAE for 32 bit.
@@ -233,10 +294,10 @@ static inline uint32_t syn_aa64_smc(uint32_t imm16)
return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
-static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_thumb)
+static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
{
return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
- | (is_thumb ? 0 : ARM_EL_IL);
+ | (is_16bit ? 0 : ARM_EL_IL);
}
static inline uint32_t syn_aa32_hvc(uint32_t imm16)
@@ -254,10 +315,10 @@ static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
}
-static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_thumb)
+static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
{
return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
- | (is_thumb ? 0 : ARM_EL_IL);
+ | (is_16bit ? 0 : ARM_EL_IL);
}
static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
@@ -271,48 +332,48 @@ static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
int crn, int crm, int rt, int isread,
- bool is_thumb)
+ bool is_16bit)
{
return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
- | (is_thumb ? 0 : ARM_EL_IL)
+ | (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
| (crn << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
int crn, int crm, int rt, int isread,
- bool is_thumb)
+ bool is_16bit)
{
return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
- | (is_thumb ? 0 : ARM_EL_IL)
+ | (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
| (crn << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
int rt, int rt2, int isread,
- bool is_thumb)
+ bool is_16bit)
{
return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
- | (is_thumb ? 0 : ARM_EL_IL)
+ | (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc1 << 16)
| (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
}
static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
int rt, int rt2, int isread,
- bool is_thumb)
+ bool is_16bit)
{
return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
- | (is_thumb ? 0 : ARM_EL_IL)
+ | (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20) | (opc1 << 16)
| (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
}
-static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_thumb)
+static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
{
return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
- | (is_thumb ? 0 : ARM_EL_IL)
+ | (is_16bit ? 0 : ARM_EL_IL)
| (cv << 24) | (cond << 20);
}
@@ -372,6 +433,9 @@ void hw_breakpoint_update(ARMCPU *cpu, int n);
*/
void hw_breakpoint_update_all(ARMCPU *cpu);
+/* Callback function for checking if a watchpoint should trigger. */
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
+
/* Callback function for when a watchpoint or breakpoint triggers. */
void arm_debug_excp_handler(CPUState *cs);
@@ -387,8 +451,29 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
void arm_handle_psci_call(ARMCPU *cpu);
#endif
+/**
+ * ARMMMUFaultInfo: Information describing an ARM MMU Fault
+ * @s2addr: Address that caused a fault at stage 2
+ * @stage2: True if we faulted at stage 2
+ * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
+ */
+typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
+struct ARMMMUFaultInfo {
+ target_ulong s2addr;
+ bool stage2;
+ bool s1ptw;
+};
+
/* Do a page table walk and add page to TLB if possible */
bool arm_tlb_fill(CPUState *cpu, vaddr address, int rw, int mmu_idx,
- uint32_t *fsr);
+ uint32_t *fsr, ARMMMUFaultInfo *fi);
+
+/* Return true if the stage 1 translation regime is using LPAE format page
+ * tables */
+bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
+ int is_user, uintptr_t retaddr);
#endif
diff --git a/qemu/target-arm/iwmmxt_helper.c b/qemu/target-arm/iwmmxt_helper.c
index a5069144d..7d87e1a0a 100644
--- a/qemu/target-arm/iwmmxt_helper.c
+++ b/qemu/target-arm/iwmmxt_helper.c
@@ -19,8 +19,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <stdlib.h>
-#include <stdio.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
diff --git a/qemu/target-arm/kvm-consts.h b/qemu/target-arm/kvm-consts.h
index 943bf8980..a2c951859 100644
--- a/qemu/target-arm/kvm-consts.h
+++ b/qemu/target-arm/kvm-consts.h
@@ -15,7 +15,6 @@
#define ARM_KVM_CONSTS_H
#ifdef CONFIG_KVM
-#include "qemu/compiler.h"
#include <linux/kvm.h>
#include <linux/psci.h>
diff --git a/qemu/target-arm/kvm-stub.c b/qemu/target-arm/kvm-stub.c
index db2edc2c4..38bf43387 100644
--- a/qemu/target-arm/kvm-stub.c
+++ b/qemu/target-arm/kvm-stub.c
@@ -9,6 +9,7 @@
* See the COPYING file in the top-level directory.
*
*/
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "kvm_arm.h"
diff --git a/qemu/target-arm/kvm.c b/qemu/target-arm/kvm.c
index b27854208..36710320f 100644
--- a/qemu/target-arm/kvm.c
+++ b/qemu/target-arm/kvm.c
@@ -8,8 +8,7 @@
*
*/
-#include <stdio.h>
-#include <sys/types.h>
+#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -17,6 +16,7 @@
#include "qemu-common.h"
#include "qemu/timer.h"
+#include "qemu/error-report.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "kvm_arm.h"
@@ -24,6 +24,7 @@
#include "internals.h"
#include "hw/arm/arm.h"
#include "exec/memattrs.h"
+#include "hw/boards.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
@@ -61,13 +62,18 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
goto err;
}
+ if (!init) {
+ /* Caller doesn't want the VCPU to be initialized, so skip it */
+ goto finish;
+ }
+
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
if (ret >= 0) {
ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
if (ret < 0) {
goto err;
}
- } else {
+ } else if (cpus_to_try) {
/* Old kernel which doesn't know about the
* PREFERRED_TARGET ioctl: we know it will only support
* creating one kind of guest CPU which is its preferred
@@ -84,8 +90,15 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
if (ret < 0) {
goto err;
}
+ } else {
+ /* Treat a NULL cpus_to_try argument the same as an empty
+ * list, which means we will fail the call since this must
+ * be an old kernel which doesn't support PREFERRED_TARGET.
+ */
+ goto err;
}
+finish:
fdarray[0] = kvmfd;
fdarray[1] = vmfd;
fdarray[2] = cpufd;
@@ -516,9 +529,23 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
return MEMTXATTRS_UNSPECIFIED;
}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
- return 0;
+ int ret = 0;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_DEBUG:
+ if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
+ ret = EXCP_DEBUG;
+ } /* otherwise return to guest */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
+ __func__, run->exit_reason);
+ break;
+ }
+ return ret;
}
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
@@ -541,66 +568,56 @@ int kvm_arch_on_sigbus(int code, void *addr)
return 1;
}
+/* The #ifdef protections are until 32bit headers are imported and can
+ * be removed once both 32 and 64 bit reach feature parity.
+ */
void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
{
- qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
-}
-
-int kvm_arch_insert_sw_breakpoint(CPUState *cs,
- struct kvm_sw_breakpoint *bp)
-{
- qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
- return -EINVAL;
-}
-
-int kvm_arch_insert_hw_breakpoint(target_ulong addr,
- target_ulong len, int type)
-{
- qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
- return -EINVAL;
-}
-
-int kvm_arch_remove_hw_breakpoint(target_ulong addr,
- target_ulong len, int type)
-{
- qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
- return -EINVAL;
-}
-
-int kvm_arch_remove_sw_breakpoint(CPUState *cs,
- struct kvm_sw_breakpoint *bp)
-{
- qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
- return -EINVAL;
-}
-
-void kvm_arch_remove_all_hw_breakpoints(void)
-{
- qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+#ifdef KVM_GUESTDBG_USE_SW_BP
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+#endif
+#ifdef KVM_GUESTDBG_USE_HW
+ if (kvm_arm_hw_debug_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
+ kvm_arm_copy_hw_debug_data(&dbg->arch);
+ }
+#endif
}
void kvm_arch_init_irq_routing(KVMState *s)
{
}
-int kvm_arch_irqchip_create(KVMState *s)
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
{
- int ret;
+ if (machine_kernel_irqchip_split(ms)) {
+ perror("-machine kernel_irqchip=split is not supported on ARM.");
+ exit(1);
+ }
/* If we can create the VGIC using the newer device control API, we
* let the device do this when it initializes itself, otherwise we
* fall back to the old API */
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
+}
- ret = kvm_create_device(s, KVM_DEV_TYPE_ARM_VGIC_V2, true);
- if (ret == 0) {
- return 1;
+int kvm_arm_vgic_probe(void)
+{
+ if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
+ return 3;
+ } else if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
+ return 2;
+ } else {
+ return 0;
}
-
- return 0;
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
- uint64_t address, uint32_t data)
+ uint64_t address, uint32_t data, PCIDevice *dev)
{
return 0;
}
diff --git a/qemu/target-arm/kvm32.c b/qemu/target-arm/kvm32.c
index 421ce0ea0..d44a7f92b 100644
--- a/qemu/target-arm/kvm32.c
+++ b/qemu/target-arm/kvm32.c
@@ -8,8 +8,7 @@
*
*/
-#include <stdio.h>
-#include <sys/types.h>
+#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -181,7 +180,6 @@ int kvm_arm_cpreg_level(uint64_t regidx)
return KVM_PUT_RUNTIME_STATE;
}
-#define ARM_MPIDR_HWID_BITMASK 0xFFFFFF
#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
int kvm_arch_init_vcpu(CPUState *cs)
@@ -234,7 +232,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (ret) {
return ret;
}
- cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
+ cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
return kvm_arm_init_cpreg_list(cpu);
}
@@ -281,30 +279,30 @@ static const Reg regs[] = {
COREREG(usr_regs.uregs[10], usr_regs[2]),
COREREG(usr_regs.uregs[11], usr_regs[3]),
COREREG(usr_regs.uregs[12], usr_regs[4]),
- COREREG(usr_regs.uregs[13], banked_r13[0]),
- COREREG(usr_regs.uregs[14], banked_r14[0]),
+ COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
+ COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
/* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
- COREREG(svc_regs[0], banked_r13[1]),
- COREREG(svc_regs[1], banked_r14[1]),
- COREREG64(svc_regs[2], banked_spsr[1]),
- COREREG(abt_regs[0], banked_r13[2]),
- COREREG(abt_regs[1], banked_r14[2]),
- COREREG64(abt_regs[2], banked_spsr[2]),
- COREREG(und_regs[0], banked_r13[3]),
- COREREG(und_regs[1], banked_r14[3]),
- COREREG64(und_regs[2], banked_spsr[3]),
- COREREG(irq_regs[0], banked_r13[4]),
- COREREG(irq_regs[1], banked_r14[4]),
- COREREG64(irq_regs[2], banked_spsr[4]),
+ COREREG(svc_regs[0], banked_r13[BANK_SVC]),
+ COREREG(svc_regs[1], banked_r14[BANK_SVC]),
+ COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
+ COREREG(abt_regs[0], banked_r13[BANK_ABT]),
+ COREREG(abt_regs[1], banked_r14[BANK_ABT]),
+ COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
+ COREREG(und_regs[0], banked_r13[BANK_UND]),
+ COREREG(und_regs[1], banked_r14[BANK_UND]),
+ COREREG64(und_regs[2], banked_spsr[BANK_UND]),
+ COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
+ COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
+ COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
/* R8_fiq .. R14_fiq and SPSR_fiq */
COREREG(fiq_regs[0], fiq_regs[0]),
COREREG(fiq_regs[1], fiq_regs[1]),
COREREG(fiq_regs[2], fiq_regs[2]),
COREREG(fiq_regs[3], fiq_regs[3]),
COREREG(fiq_regs[4], fiq_regs[4]),
- COREREG(fiq_regs[5], banked_r13[5]),
- COREREG(fiq_regs[6], banked_r14[5]),
- COREREG64(fiq_regs[7], banked_spsr[5]),
+ COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
+ COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
+ COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
/* R15 */
COREREG(usr_regs.uregs[15], regs[15]),
/* VFP system registers */
@@ -430,7 +428,7 @@ int kvm_arch_get_registers(CPUState *cs)
if (ret) {
return ret;
}
- cpsr_write(env, cpsr, 0xffffffff);
+ cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
/* Make sure the current mode regs are properly set */
mode = env->uncached_cpsr & CPSR_M;
@@ -476,3 +474,50 @@ int kvm_arch_get_registers(CPUState *cs)
return 0;
}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
+ return -EINVAL;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
+ return -EINVAL;
+}
+
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
+ return false;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+ return -EINVAL;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+ return -EINVAL;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+}
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+}
+
+bool kvm_arm_hw_debug_active(CPUState *cs)
+{
+ return false;
+}
diff --git a/qemu/target-arm/kvm64.c b/qemu/target-arm/kvm64.c
index bd60889d1..e8527bf0c 100644
--- a/qemu/target-arm/kvm64.c
+++ b/qemu/target-arm/kvm64.c
@@ -2,22 +2,26 @@
* ARM implementation of KVM hooks, 64 bit specific code
*
* Copyright Mian-M. Hamayun 2013, Virtual Open Systems
+ * Copyright Alex Bennée 2014, Linaro
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
-#include <stdio.h>
-#include <sys/types.h>
+#include "qemu/osdep.h"
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <sys/ptrace.h>
+#include <linux/elf.h>
#include <linux/kvm.h>
-#include "config-host.h"
#include "qemu-common.h"
#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "qemu/host-utils.h"
+#include "exec/gdbstub.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "kvm_arm.h"
@@ -25,6 +29,360 @@
#include "internals.h"
#include "hw/arm/arm.h"
+static bool have_guest_debug;
+
+/*
+ * Although the ARM implementation of hardware assisted debugging
+ * allows for different breakpoints per-core, the current GDB
+ * interface treats them as a global pool of registers (which seems to
+ * be the case for x86, ppc and s390). As a result we store one copy
+ * of registers which is used for all active cores.
+ *
+ * Write access is serialised by virtue of the GDB protocol which
+ * updates things. Read access (i.e. when the values are copied to the
+ * vCPU) is also gated by GDB's run control.
+ *
+ * This is not unreasonable as most of the time debugging kernels you
+ * never know which core will eventually execute your function.
+ */
+
+typedef struct {
+ uint64_t bcr;
+ uint64_t bvr;
+} HWBreakpoint;
+
+/* The watchpoint registers can cover more area than the requested
+ * watchpoint so we need to store the additional information
+ * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
+ * when the watchpoint is hit.
+ */
+typedef struct {
+ uint64_t wcr;
+ uint64_t wvr;
+ CPUWatchpoint details;
+} HWWatchpoint;
+
+/* Maximum and current break/watch point counts */
+int max_hw_bps, max_hw_wps;
+GArray *hw_breakpoints, *hw_watchpoints;
+
+#define cur_hw_wps (hw_watchpoints->len)
+#define cur_hw_bps (hw_breakpoints->len)
+#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
+#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
+
+/**
+ * kvm_arm_init_debug() - check for guest debug capabilities
+ * @cs: CPUState
+ *
+ * kvm_check_extension returns the number of debug registers we have
+ * or 0 if we have none.
+ *
+ */
+static void kvm_arm_init_debug(CPUState *cs)
+{
+ have_guest_debug = kvm_check_extension(cs->kvm_state,
+ KVM_CAP_SET_GUEST_DEBUG);
+
+ max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
+ hw_watchpoints = g_array_sized_new(true, true,
+ sizeof(HWWatchpoint), max_hw_wps);
+
+ max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
+ hw_breakpoints = g_array_sized_new(true, true,
+ sizeof(HWBreakpoint), max_hw_bps);
+ return;
+}
+
+/**
+ * insert_hw_breakpoint()
+ * @addr: address of breakpoint
+ *
+ * See ARM ARM D2.9.1 for details but here we are only going to create
+ * simple un-linked breakpoints (i.e. we don't chain breakpoints
+ * together to match address and context or vmid). The hardware is
+ * capable of fancier matching but that will require exposing that
+ * fanciness to GDB's interface
+ *
+ * D7.3.2 DBGBCR<n>_EL1, Debug Breakpoint Control Registers
+ *
+ * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
+ * +------+------+-------+-----+----+------+-----+------+-----+---+
+ * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
+ * +------+------+-------+-----+----+------+-----+------+-----+---+
+ *
+ * BT: Breakpoint type (0 = unlinked address match)
+ * LBN: Linked BP number (0 = unused)
+ * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
+ * BAS: Byte Address Select (RES1 for AArch64)
+ * E: Enable bit
+ */
+static int insert_hw_breakpoint(target_ulong addr)
+{
+ HWBreakpoint brk = {
+ .bcr = 0x1, /* BCR E=1, enable */
+ .bvr = addr
+ };
+
+ if (cur_hw_bps >= max_hw_bps) {
+ return -ENOBUFS;
+ }
+
+ brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */
+ brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */
+
+ g_array_append_val(hw_breakpoints, brk);
+
+ return 0;
+}
+
+/**
+ * delete_hw_breakpoint()
+ * @pc: address of breakpoint
+ *
+ * Delete a breakpoint and shuffle any above down
+ */
+
+static int delete_hw_breakpoint(target_ulong pc)
+{
+ int i;
+ for (i = 0; i < hw_breakpoints->len; i++) {
+ HWBreakpoint *brk = get_hw_bp(i);
+ if (brk->bvr == pc) {
+ g_array_remove_index(hw_breakpoints, i);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * insert_hw_watchpoint()
+ * @addr: address of watch point
+ * @len: size of area
+ * @type: type of watch point
+ *
+ * See ARM ARM D2.10. As with the breakpoints we can do some advanced
+ * stuff if we want to. The watch points can be linked with the break
+ * points above to make them context aware. However for simplicity
+ * currently we only deal with simple read/write watch points.
+ *
+ * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
+ *
+ * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
+ * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
+ * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
+ * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
+ *
+ * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
+ * WT: 0 - unlinked, 1 - linked (not currently used)
+ * LBN: Linked BP number (not currently used)
+ * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
+ * BAS: Byte Address Select
+ * LSC: Load/Store control (01: load, 10: store, 11: both)
+ * E: Enable
+ *
+ * The bottom 2 bits of the value register are masked. Therefore to
+ * break on any sizes smaller than an unaligned word you need to set
+ * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
+ * need to ensure you mask the address as required and set BAS=0xff
+ */
+
+static int insert_hw_watchpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ HWWatchpoint wp = {
+ .wcr = 1, /* E=1, enable */
+ .wvr = addr & (~0x7ULL),
+ .details = { .vaddr = addr, .len = len }
+ };
+
+ if (cur_hw_wps >= max_hw_wps) {
+ return -ENOBUFS;
+ }
+
+ /*
+ * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
+ * valid whether EL3 is implemented or not
+ */
+ wp.wcr = deposit32(wp.wcr, 1, 2, 3);
+
+ switch (type) {
+ case GDB_WATCHPOINT_READ:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 1);
+ wp.details.flags = BP_MEM_READ;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 2);
+ wp.details.flags = BP_MEM_WRITE;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 3);
+ wp.details.flags = BP_MEM_ACCESS;
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+ if (len <= 8) {
+ /* we align the address and set the bits in BAS */
+ int off = addr & 0x7;
+ int bas = (1 << len) - 1;
+
+ wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
+ } else {
+ /* For ranges above 8 bytes we need to be a power of 2 */
+ if (is_power_of_2(len)) {
+ int bits = ctz64(len);
+
+ wp.wvr &= ~((1 << bits) - 1);
+ wp.wcr = deposit32(wp.wcr, 24, 4, bits);
+ wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
+ } else {
+ return -ENOBUFS;
+ }
+ }
+
+ g_array_append_val(hw_watchpoints, wp);
+ return 0;
+}
+
+
+static bool check_watchpoint_in_range(int i, target_ulong addr)
+{
+ HWWatchpoint *wp = get_hw_wp(i);
+ uint64_t addr_top, addr_bottom = wp->wvr;
+ int bas = extract32(wp->wcr, 5, 8);
+ int mask = extract32(wp->wcr, 24, 4);
+
+ if (mask) {
+ addr_top = addr_bottom + (1 << mask);
+ } else {
+ /* BAS must be contiguous but can offset against the base
+ * address in DBGWVR */
+ addr_bottom = addr_bottom + ctz32(bas);
+ addr_top = addr_bottom + clo32(bas);
+ }
+
+ if (addr >= addr_bottom && addr <= addr_top) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * delete_hw_watchpoint()
+ * @addr: address of breakpoint
+ *
+ * Delete a breakpoint and shuffle any above down
+ */
+
+static int delete_hw_watchpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int i;
+ for (i = 0; i < cur_hw_wps; i++) {
+ if (check_watchpoint_in_range(i, addr)) {
+ g_array_remove_index(hw_watchpoints, i);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return insert_hw_breakpoint(addr);
+ break;
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return insert_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return delete_hw_breakpoint(addr);
+ break;
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return delete_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ if (cur_hw_wps > 0) {
+ g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
+ }
+ if (cur_hw_bps > 0) {
+ g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
+ }
+}
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
+{
+ int i;
+ memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
+
+ for (i = 0; i < max_hw_wps; i++) {
+ HWWatchpoint *wp = get_hw_wp(i);
+ ptr->dbg_wcr[i] = wp->wcr;
+ ptr->dbg_wvr[i] = wp->wvr;
+ }
+ for (i = 0; i < max_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ ptr->dbg_bcr[i] = bp->bcr;
+ ptr->dbg_bvr[i] = bp->bvr;
+ }
+}
+
+bool kvm_arm_hw_debug_active(CPUState *cs)
+{
+ return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
+}
+
+static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
+{
+ int i;
+
+ for (i = 0; i < cur_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ if (bp->bvr == pc) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
+{
+ int i;
+
+ for (i = 0; i < cur_hw_wps; i++) {
+ if (check_watchpoint_in_range(i, addr)) {
+ return &get_hw_wp(i)->details;
+ }
+ }
+ return NULL;
+}
+
+
static inline void set_feature(uint64_t *features, int feature)
{
*features |= 1ULL << feature;
@@ -77,7 +435,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
return true;
}
-#define ARM_MPIDR_HWID_BITMASK 0xFF00FFFFFFULL
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
int kvm_arch_init_vcpu(CPUState *cs)
@@ -120,7 +477,9 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (ret) {
return ret;
}
- cpu->mp_affinity = mpidr & ARM_MPIDR_HWID_BITMASK;
+ cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
+
+ kvm_arm_init_debug(cs);
return kvm_arm_init_cpreg_list(cpu);
}
@@ -363,8 +722,7 @@ int kvm_arch_get_registers(CPUState *cs)
if (is_a64(env)) {
pstate_write(env, val);
} else {
- env->uncached_cpsr = val & CPSR_M;
- cpsr_write(env, val, 0xffffffff);
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
}
/* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
@@ -464,3 +822,105 @@ int kvm_arch_get_registers(CPUState *cs)
/* TODO: other registers */
return ret;
}
+
+/* C6.6.29 BRK instruction */
+static const uint32_t brk_insn = 0xd4200000;
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ if (have_guest_debug) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ error_report("guest debug not supported on this kernel");
+ return -EINVAL;
+ }
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static uint32_t brk;
+
+ if (have_guest_debug) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
+ brk != brk_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ error_report("guest debug not supported on this kernel");
+ return -EINVAL;
+ }
+}
+
+/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
+ *
+ * To minimise translating between kernel and user-space the kernel
+ * ABI just provides user-space with the full exception syndrome
+ * register value to be decoded in QEMU.
+ */
+
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
+{
+ int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUARMState *env = &cpu->env;
+
+ /* Ensure PC is synchronised */
+ kvm_cpu_synchronize_state(cs);
+
+ switch (hsr_ec) {
+ case EC_SOFTWARESTEP:
+ if (cs->singlestep_enabled) {
+ return true;
+ } else {
+ /*
+ * The kernel should have suppressed the guest's ability to
+ * single step at this point so something has gone wrong.
+ */
+ error_report("%s: guest single-step while debugging unsupported"
+ " (%"PRIx64", %"PRIx32")\n",
+ __func__, env->pc, debug_exit->hsr);
+ return false;
+ }
+ break;
+ case EC_AA64_BKPT:
+ if (kvm_find_sw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_BREAKPOINT:
+ if (find_hw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_WATCHPOINT:
+ {
+ CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
+ if (wp) {
+ cs->watchpoint_hit = wp;
+ return true;
+ }
+ break;
+ }
+ default:
+ error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")\n",
+ __func__, debug_exit->hsr, env->pc);
+ }
+
+ /* If we are not handling the debug exception it must belong to
+ * the guest. Let's re-use the existing TCG interrupt code to set
+ * everything up properly.
+ */
+ cs->exception_index = EXCP_BKPT;
+ env->exception.syndrome = debug_exit->hsr;
+ env->exception.vaddress = debug_exit->far;
+ cc->do_interrupt(cs);
+
+ return false;
+}
diff --git a/qemu/target-arm/kvm_arm.h b/qemu/target-arm/kvm_arm.h
index 7912d7433..345233c18 100644
--- a/qemu/target-arm/kvm_arm.h
+++ b/qemu/target-arm/kvm_arm.h
@@ -124,9 +124,12 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu);
* kvm_arm_create_scratch_host_vcpu:
* @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
* QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
- * know the PREFERRED_TARGET ioctl
+ * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
+ * an empty array.
* @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
- * @init: filled in with the necessary values for creating a host vcpu
+ * @init: filled in with the necessary values for creating a host
+ * vcpu. If NULL is provided, will not init the vCPU (though the cpufd
+ * will still be set up).
*
* Create a scratch vcpu in its own VM of the type preferred by the host
* kernel (as would be used for '-cpu host'), for purposes of probing it
@@ -189,6 +192,60 @@ int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu);
*/
int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
+int kvm_arm_vgic_probe(void);
+
+#else
+
+static inline int kvm_arm_vgic_probe(void)
+{
+ return 0;
+}
+
#endif
+static inline const char *gic_class_name(void)
+{
+ return kvm_irqchip_in_kernel() ? "kvm-arm-gic" : "arm_gic";
+}
+
+/**
+ * gicv3_class_name
+ *
+ * Return name of GICv3 class to use depending on whether KVM acceleration is
+ * in use. May throw an error if the chosen implementation is not available.
+ *
+ * Returns: class name to use
+ */
+const char *gicv3_class_name(void);
+
+/**
+ * kvm_arm_handle_debug:
+ * @cs: CPUState
+ * @debug_exit: debug part of the KVM exit structure
+ *
+ * Returns: TRUE if the debug exception was handled.
+ */
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit);
+
+/**
+ * kvm_arm_hw_debug_active:
+ * @cs: CPU State
+ *
+ * Return: TRUE if any hardware breakpoints in use.
+ */
+
+bool kvm_arm_hw_debug_active(CPUState *cs);
+
+/**
+ * kvm_arm_copy_hw_debug_data:
+ *
+ * @ptr: kvm_guest_debug_arch structure
+ *
+ * Copy the architecture specific debug registers into the
+ * kvm_guest_debug ioctl structure.
+ */
+struct kvm_guest_debug_arch;
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);
+
#endif
diff --git a/qemu/target-arm/machine.c b/qemu/target-arm/machine.c
index 32adfe792..03a73d950 100644
--- a/qemu/target-arm/machine.c
+++ b/qemu/target-arm/machine.c
@@ -1,5 +1,7 @@
+#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/boards.h"
+#include "qemu/error-report.h"
#include "sysemu/kvm.h"
#include "kvm_arm.h"
#include "internals.h"
@@ -171,9 +173,7 @@ static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
return 0;
}
- /* Avoid mode switch when restoring CPSR */
- env->uncached_cpsr = val & CPSR_M;
- cpsr_write(env, val, 0xffffffff);
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
return 0;
}
@@ -328,3 +328,20 @@ const VMStateDescription vmstate_arm_cpu = {
NULL
}
};
+
+const char *gicv3_class_name(void)
+{
+ if (kvm_irqchip_in_kernel()) {
+#ifdef TARGET_AARCH64
+ return "kvm-arm-gicv3";
+#else
+ error_report("KVM GICv3 acceleration is not supported on this "
+ "platform");
+#endif
+ } else {
+ /* TODO: Software emulation is not implemented yet */
+ error_report("KVM is currently required for GICv3 emulation");
+ }
+
+ exit(1);
+}
diff --git a/qemu/target-arm/monitor.c b/qemu/target-arm/monitor.c
new file mode 100644
index 000000000..1ee59a2e4
--- /dev/null
+++ b/qemu/target-arm/monitor.c
@@ -0,0 +1,84 @@
+/*
+ * QEMU monitor.c for ARM.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "qmp-commands.h"
+#include "hw/boards.h"
+#include "kvm_arm.h"
+
+static GICCapability *gic_cap_new(int version)
+{
+ GICCapability *cap = g_new0(GICCapability, 1);
+ cap->version = version;
+ /* by default, support none */
+ cap->emulated = false;
+ cap->kernel = false;
+ return cap;
+}
+
+static GICCapabilityList *gic_cap_list_add(GICCapabilityList *head,
+ GICCapability *cap)
+{
+ GICCapabilityList *item = g_new0(GICCapabilityList, 1);
+ item->value = cap;
+ item->next = head;
+ return item;
+}
+
+static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3)
+{
+#ifdef CONFIG_KVM
+ int fdarray[3];
+
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) {
+ return;
+ }
+
+ /* Test KVM GICv2 */
+ if (kvm_device_supported(fdarray[1], KVM_DEV_TYPE_ARM_VGIC_V2)) {
+ v2->kernel = true;
+ }
+
+ /* Test KVM GICv3 */
+ if (kvm_device_supported(fdarray[1], KVM_DEV_TYPE_ARM_VGIC_V3)) {
+ v3->kernel = true;
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+#endif
+}
+
+GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
+{
+ GICCapabilityList *head = NULL;
+ GICCapability *v2 = gic_cap_new(2), *v3 = gic_cap_new(3);
+
+ v2->emulated = true;
+ /* TODO: we'd change to true after we get emulated GICv3. */
+ v3->emulated = false;
+
+ gic_cap_kvm_probe(v2, v3);
+
+ head = gic_cap_list_add(head, v2);
+ head = gic_cap_list_add(head, v3);
+
+ return head;
+}
diff --git a/qemu/target-arm/neon_helper.c b/qemu/target-arm/neon_helper.c
index 47d13e908..1f1844f5b 100644
--- a/qemu/target-arm/neon_helper.c
+++ b/qemu/target-arm/neon_helper.c
@@ -6,8 +6,7 @@
*
* This code is licensed under the GNU GPL v2.
*/
-#include <stdlib.h>
-#include <stdio.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/exec-all.h"
diff --git a/qemu/target-arm/op_helper.c b/qemu/target-arm/op_helper.c
index 663c05d1d..d626ff1a2 100644
--- a/qemu/target-arm/op_helper.c
+++ b/qemu/target-arm/op_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "internals.h"
@@ -83,19 +84,27 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
{
bool ret;
uint32_t fsr = 0;
+ ARMMMUFaultInfo fi = {};
- ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr);
+ ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr, &fi);
if (unlikely(ret)) {
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
uint32_t syn, exc;
- bool same_el = (arm_current_el(env) != 0);
+ unsigned int target_el;
+ bool same_el;
if (retaddr) {
/* now we have a real cpu fault */
cpu_restore_state(cs, retaddr);
}
+ target_el = exception_target_el(env);
+ if (fi.stage2) {
+ target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+ }
+ same_el = arm_current_el(env) == target_el;
/* AArch64 syndrome does not have an LPAE bit */
syn = fsr & ~(1 << 9);
@@ -103,10 +112,10 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
* information; this is always true for exceptions reported to EL1.
*/
if (is_write == 2) {
- syn = syn_insn_abort(same_el, 0, 0, syn);
+ syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
exc = EXCP_PREFETCH_ABORT;
} else {
- syn = syn_data_abort(same_el, 0, 0, 0, is_write == 1, syn);
+ syn = syn_data_abort(same_el, 0, 0, fi.s1ptw, is_write == 1, syn);
if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
fsr |= (1 << 11);
}
@@ -115,10 +124,48 @@ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
env->exception.vaddress = addr;
env->exception.fsr = fsr;
- raise_exception(env, exc, syn, exception_target_el(env));
+ raise_exception(env, exc, syn, target_el);
+ }
+}
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
+ int is_user, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
}
+
+ target_el = exception_target_el(env);
+ same_el = (arm_current_el(env) == target_el);
+
+ env->exception.vaddress = vaddr;
+
+ /* the DFSR for an alignment fault depends on whether we're using
+ * the LPAE long descriptor format, or the short descriptor format
+ */
+ if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
+ env->exception.fsr = 0x21;
+ } else {
+ env->exception.fsr = 0x1;
+ }
+
+ if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
+ env->exception.fsr |= (1 << 11);
+ }
+
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_data_abort(same_el, 0, 0, 0, is_write == 1, 0x21),
+ target_el);
}
-#endif
+
+#endif /* !defined(CONFIG_USER_ONLY) */
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{
@@ -249,6 +296,11 @@ uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
return res;
}
+void HELPER(setend)(CPUARMState *env)
+{
+ env->uncached_cpsr ^= CPSR_E;
+}
+
/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
* The function returns the target EL (1-3) if the instruction is to be trapped;
* otherwise it returns 0 indicating it is not trapped.
@@ -375,7 +427,13 @@ uint32_t HELPER(cpsr_read)(CPUARMState *env)
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
{
- cpsr_write(env, val, mask);
+ cpsr_write(env, val, mask, CPSRWriteByInstr);
+}
+
+/* Write the CPSR for a 32-bit exception return */
+void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
+{
+ cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
}
/* Access to user mode registers from privileged modes. */
@@ -384,9 +442,9 @@ uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
uint32_t val;
if (regno == 13) {
- val = env->banked_r13[0];
+ val = env->banked_r13[BANK_USRSYS];
} else if (regno == 14) {
- val = env->banked_r14[0];
+ val = env->banked_r14[BANK_USRSYS];
} else if (regno >= 8
&& (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
val = env->usr_regs[regno - 8];
@@ -399,9 +457,9 @@ uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
{
if (regno == 13) {
- env->banked_r13[0] = val;
+ env->banked_r13[BANK_USRSYS] = val;
} else if (regno == 14) {
- env->banked_r14[0] = val;
+ env->banked_r14[BANK_USRSYS] = val;
} else if (regno >= 8
&& (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
env->usr_regs[regno - 8] = val;
@@ -410,7 +468,154 @@ void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
}
}
-void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
+void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
+{
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ env->regs[13] = val;
+ } else {
+ env->banked_r13[bank_number(mode)] = val;
+ }
+}
+
+uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
+{
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
+ /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
+ * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
+ */
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ return env->regs[13];
+ } else {
+ return env->banked_r13[bank_number(mode)];
+ }
+}
+
+static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
+ uint32_t regno)
+{
+ /* Raise an exception if the requested access is one of the UNPREDICTABLE
+ * cases; otherwise return. This broadly corresponds to the pseudocode
+ * BankedRegisterAccessValid() and SPSRAccessValid(),
+ * except that we have already handled some cases at translate time.
+ */
+ int curmode = env->uncached_cpsr & CPSR_M;
+
+ if (curmode == tgtmode) {
+ goto undef;
+ }
+
+ if (tgtmode == ARM_CPU_MODE_USR) {
+ switch (regno) {
+ case 8 ... 12:
+ if (curmode != ARM_CPU_MODE_FIQ) {
+ goto undef;
+ }
+ break;
+ case 13:
+ if (curmode == ARM_CPU_MODE_SYS) {
+ goto undef;
+ }
+ break;
+ case 14:
+ if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
+ goto undef;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (tgtmode == ARM_CPU_MODE_HYP) {
+ switch (regno) {
+ case 17: /* ELR_Hyp */
+ if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ break;
+ default:
+ if (curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ break;
+ }
+ }
+
+ return;
+
+undef:
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+}
+
+void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
+ uint32_t regno)
+{
+ msr_mrs_banked_exc_checks(env, tgtmode, regno);
+
+ switch (regno) {
+ case 16: /* SPSRs */
+ env->banked_spsr[bank_number(tgtmode)] = value;
+ break;
+ case 17: /* ELR_Hyp */
+ env->elr_el[2] = value;
+ break;
+ case 13:
+ env->banked_r13[bank_number(tgtmode)] = value;
+ break;
+ case 14:
+ env->banked_r14[bank_number(tgtmode)] = value;
+ break;
+ case 8 ... 12:
+ switch (tgtmode) {
+ case ARM_CPU_MODE_USR:
+ env->usr_regs[regno - 8] = value;
+ break;
+ case ARM_CPU_MODE_FIQ:
+ env->fiq_regs[regno - 8] = value;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
+{
+ msr_mrs_banked_exc_checks(env, tgtmode, regno);
+
+ switch (regno) {
+ case 16: /* SPSRs */
+ return env->banked_spsr[bank_number(tgtmode)];
+ case 17: /* ELR_Hyp */
+ return env->elr_el[2];
+ case 13:
+ return env->banked_r13[bank_number(tgtmode)];
+ case 14:
+ return env->banked_r14[bank_number(tgtmode)];
+ case 8 ... 12:
+ switch (tgtmode) {
+ case ARM_CPU_MODE_USR:
+ return env->usr_regs[regno - 8];
+ case ARM_CPU_MODE_FIQ:
+ return env->fiq_regs[regno - 8];
+ default:
+ g_assert_not_reached();
+ }
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
+ uint32_t isread)
{
const ARMCPRegInfo *ri = rip;
int target_el;
@@ -424,7 +629,7 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
return;
}
- switch (ri->accessfn(env, ri)) {
+ switch (ri->accessfn(env, ri, isread)) {
case CP_ACCESS_OK:
return;
case CP_ACCESS_TRAP:
@@ -444,6 +649,27 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
target_el = exception_target_el(env);
syndrome = syn_uncategorized();
break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
+ target_el = 2;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
+ target_el = 3;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_FP_EL2:
+ target_el = 2;
+ /* Since we are an implementation that takes exceptions on a trapped
+ * conditional insn only if the insn has passed its condition code
+ * check, we take the IMPDEF choice to always report CV=1 COND=0xe
+ * (which is also the required value for AArch64 traps).
+ */
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
+ case CP_ACCESS_TRAP_FP_EL3:
+ target_el = 3;
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
default:
g_assert_not_reached();
}
@@ -558,12 +784,14 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
bool smd = env->cp15.scr_el3 & SCR_SMD;
- /* On ARMv8 AArch32, SMD only applies to NS state.
- * On ARMv7 SMD only applies to NS state and only if EL2 is available.
- * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
- * the EL2 condition here.
+ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
+ * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
+ * extensions, SMD only applies to NS state.
+ * On ARMv7 without the Virtualization extensions, the SMD bit
+ * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
+ * so we need not special case this here.
*/
- bool undef = is_a64(env) ? smd : (!secure && smd);
+ bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
if (arm_is_psci_call(cpu, EXCP_SMC)) {
/* If PSCI is enabled and this looks like a valid PSCI call then
@@ -586,12 +814,51 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
}
}
+static int el_from_spsr(uint32_t spsr)
+{
+ /* Return the exception level that this SPSR is requesting a return to,
+ * or -1 if it is invalid (an illegal return)
+ */
+ if (spsr & PSTATE_nRW) {
+ switch (spsr & CPSR_M) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ return 1;
+ case ARM_CPU_MODE_MON:
+ /* Returning to Mon from AArch64 is never possible,
+ * so this is an illegal return.
+ */
+ default:
+ return -1;
+ }
+ } else {
+ if (extract32(spsr, 1, 1)) {
+ /* Return with reserved M[1] bit set */
+ return -1;
+ }
+ if (extract32(spsr, 0, 4) == 1) {
+ /* return to EL0 with M[0] bit set */
+ return -1;
+ }
+ return extract32(spsr, 2, 2);
+ }
+}
+
void HELPER(exception_return)(CPUARMState *env)
{
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el;
+ bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
aarch64_save_sp(env, cur_el);
@@ -608,35 +875,51 @@ void HELPER(exception_return)(CPUARMState *env)
spsr &= ~PSTATE_SS;
}
- if (spsr & PSTATE_nRW) {
- /* TODO: We currently assume EL1/2/3 are running in AArch64. */
+ new_el = el_from_spsr(spsr);
+ if (new_el == -1) {
+ goto illegal_return;
+ }
+ if (new_el > cur_el
+ || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
+ /* Disallow return to an EL which is unimplemented or higher
+ * than the current one.
+ */
+ goto illegal_return;
+ }
+
+ if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
+ /* Return to an EL which is configured for a different register width */
+ goto illegal_return;
+ }
+
+ if (new_el == 2 && arm_is_secure_below_el3(env)) {
+ /* Return to the non-existent secure-EL2 */
+ goto illegal_return;
+ }
+
+ if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
+ && !arm_is_secure_below_el3(env)) {
+ goto illegal_return;
+ }
+
+ if (!return_to_aa64) {
env->aarch64 = 0;
- new_el = 0;
- env->uncached_cpsr = 0x10;
- cpsr_write(env, spsr, ~0);
+ /* We do a raw CPSR write because aarch64_sync_64_to_32()
+ * will sort the register banks out for us, and we've already
+ * caught all the bad-mode cases in el_from_spsr().
+ */
+ cpsr_write(env, spsr, ~0, CPSRWriteRaw);
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
aarch64_sync_64_to_32(env);
- env->regs[15] = env->elr_el[1] & ~0x1;
- } else {
- new_el = extract32(spsr, 2, 2);
- if (new_el > cur_el
- || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
- /* Disallow return to an EL which is unimplemented or higher
- * than the current one.
- */
- goto illegal_return;
- }
- if (extract32(spsr, 1, 1)) {
- /* Return with reserved M[1] bit set */
- goto illegal_return;
- }
- if (new_el == 0 && (spsr & PSTATE_SP)) {
- /* Return to EL0 with M[0] bit set */
- goto illegal_return;
+ if (spsr & CPSR_T) {
+ env->regs[15] = env->elr_el[cur_el] & ~0x1;
+ } else {
+ env->regs[15] = env->elr_el[cur_el] & ~0x3;
}
+ } else {
env->aarch64 = 1;
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
@@ -859,6 +1142,25 @@ static bool check_breakpoints(ARMCPU *cpu)
return false;
}
+void HELPER(check_breakpoints)(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (check_breakpoints(cpu)) {
+ HELPER(exception_internal(env, EXCP_DEBUG));
+ }
+}
+
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+ /* Called by core code when a CPU watchpoint fires; need to check if this
+ * is also an architectural watchpoint match.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return check_watchpoints(cpu);
+}
+
void arm_debug_excp_handler(CPUState *cs)
{
/* Called by core code when a watchpoint or breakpoint fires;
@@ -870,37 +1172,44 @@ void arm_debug_excp_handler(CPUState *cs)
if (wp_hit) {
if (wp_hit->flags & BP_CPU) {
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
+
cs->watchpoint_hit = NULL;
- if (check_watchpoints(cpu)) {
- bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
- bool same_el = arm_debug_target_el(env) == arm_current_el(env);
-
- if (extended_addresses_enabled(env)) {
- env->exception.fsr = (1 << 9) | 0x22;
- } else {
- env->exception.fsr = 0x2;
- }
- env->exception.vaddress = wp_hit->hitaddr;
- raise_exception(env, EXCP_DATA_ABORT,
- syn_watchpoint(same_el, 0, wnr),
- arm_debug_target_el(env));
- } else {
- cpu_resume_from_signal(cs, NULL);
- }
- }
- } else {
- if (check_breakpoints(cpu)) {
- bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
+
if (extended_addresses_enabled(env)) {
env->exception.fsr = (1 << 9) | 0x22;
} else {
env->exception.fsr = 0x2;
}
- /* FAR is UNKNOWN, so doesn't need setting */
- raise_exception(env, EXCP_PREFETCH_ABORT,
- syn_breakpoint(same_el),
- arm_debug_target_el(env));
+ env->exception.vaddress = wp_hit->hitaddr;
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_watchpoint(same_el, 0, wnr),
+ arm_debug_target_el(env));
+ }
+ } else {
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
+
+ /* (1) GDB breakpoints should be handled first.
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
+ * since singlestep is also done by generating a debug internal
+ * exception.
+ */
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
+ return;
+ }
+
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
+ } else {
+ env->exception.fsr = 0x2;
}
+ /* FAR is UNKNOWN, so doesn't need setting */
+ raise_exception(env, EXCP_PREFETCH_ABORT,
+ syn_breakpoint(same_el),
+ arm_debug_target_el(env));
}
}
diff --git a/qemu/target-arm/psci.c b/qemu/target-arm/psci.c
index 20e4cb6f9..c55487f87 100644
--- a/qemu/target-arm/psci.c
+++ b/qemu/target-arm/psci.c
@@ -15,6 +15,7 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include <cpu.h>
#include <cpu-qom.h>
#include <exec/helper-proto.h>
diff --git a/qemu/target-arm/translate-a64.c b/qemu/target-arm/translate-a64.c
index 689f2be89..b13cff756 100644
--- a/qemu/target-arm/translate-a64.c
+++ b/qemu/target-arm/translate-a64.c
@@ -16,11 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "tcg-op.h"
@@ -30,25 +26,20 @@
#include "internals.h"
#include "qemu/host-utils.h"
+#include "exec/semihost.h"
#include "exec/gen-icount.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
+#include "exec/log.h"
#include "trace-tcg.h"
static TCGv_i64 cpu_X[32];
static TCGv_i64 cpu_pc;
-static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
/* Load/store exclusive handling */
-static TCGv_i64 cpu_exclusive_addr;
-static TCGv_i64 cpu_exclusive_val;
static TCGv_i64 cpu_exclusive_high;
-#ifdef CONFIG_USER_ONLY
-static TCGv_i64 cpu_exclusive_test;
-static TCGv_i32 cpu_exclusive_info;
-#endif
static const char *regnames[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
@@ -95,32 +86,17 @@ void a64_translate_init(void)
{
int i;
- cpu_pc = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_pc = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, pc),
"pc");
for (i = 0; i < 32; i++) {
- cpu_X[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, xregs[i]),
regnames[i]);
}
- cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
- cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
- cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
- cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
-
- cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
- cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUARMState, exclusive_val), "exclusive_val");
- cpu_exclusive_high = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, exclusive_high), "exclusive_high");
-#ifdef CONFIG_USER_ONLY
- cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUARMState, exclusive_test), "exclusive_test");
- cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUARMState, exclusive_info), "exclusive_info");
-#endif
}
static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
@@ -147,6 +123,8 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
CPUARMState *env = &cpu->env;
uint32_t psr = pstate_read(env);
int i;
+ int el = arm_current_el(env);
+ const char *ns_status;
cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
env->pc, env->xregs[31]);
@@ -158,13 +136,22 @@ void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
cpu_fprintf(f, " ");
}
}
- cpu_fprintf(f, "PSTATE=%08x (flags %c%c%c%c)\n",
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+
+ cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
psr,
psr & PSTATE_N ? 'N' : '-',
psr & PSTATE_Z ? 'Z' : '-',
psr & PSTATE_C ? 'C' : '-',
- psr & PSTATE_V ? 'V' : '-');
- cpu_fprintf(f, "\n");
+ psr & PSTATE_V ? 'V' : '-',
+ ns_status,
+ el,
+ psr & PSTATE_SP ? 'h' : 't');
if (flags & CPU_DUMP_FPU) {
int numvfpregs = 32;
@@ -188,6 +175,31 @@ void gen_a64_set_pc_im(uint64_t val)
tcg_gen_movi_i64(cpu_pc, val);
}
+typedef struct DisasCompare64 {
+ TCGCond cond;
+ TCGv_i64 value;
+} DisasCompare64;
+
+static void a64_test_cc(DisasCompare64 *c64, int cc)
+{
+ DisasCompare c32;
+
+ arm_test_cc(&c32, cc);
+
+ /* Sign-extend the 32-bit value so that the GE/LT comparisons work
+ * properly. The NE/EQ comparisons are also fine with this choice. */
+ c64->cond = c32.cond;
+ c64->value = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(c64->value, c32.value);
+
+ arm_free_cc(&c32);
+}
+
+static void a64_free_cc(DisasCompare64 *c64)
+{
+ tcg_temp_free_i64(c64->value);
+}
+
static void gen_exception_internal(int excp)
{
TCGv_i32 tcg_excp = tcg_const_i32(excp);
@@ -525,13 +537,8 @@ static TCGv_ptr get_fpstatus_ptr(void)
*/
static inline void gen_set_NZ64(TCGv_i64 result)
{
- TCGv_i64 flag = tcg_temp_new_i64();
-
- tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
- tcg_gen_trunc_i64_i32(cpu_ZF, flag);
- tcg_gen_shri_i64(flag, result, 32);
- tcg_gen_trunc_i64_i32(cpu_NF, flag);
- tcg_temp_free_i64(flag);
+ tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
}
/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
@@ -540,8 +547,8 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result)
if (sf) {
gen_set_NZ64(result);
} else {
- tcg_gen_trunc_i64_i32(cpu_ZF, result);
- tcg_gen_trunc_i64_i32(cpu_NF, result);
+ tcg_gen_extrl_i64_i32(cpu_ZF, result);
+ tcg_gen_mov_i32(cpu_NF, cpu_ZF);
}
tcg_gen_movi_i32(cpu_CF, 0);
tcg_gen_movi_i32(cpu_VF, 0);
@@ -559,7 +566,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
tcg_gen_movi_i64(tmp, 0);
tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
- tcg_gen_trunc_i64_i32(cpu_CF, flag);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
gen_set_NZ64(result);
@@ -567,8 +574,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
tcg_gen_xor_i64(tmp, t0, t1);
tcg_gen_andc_i64(flag, flag, tmp);
tcg_temp_free_i64(tmp);
- tcg_gen_shri_i64(flag, flag, 32);
- tcg_gen_trunc_i64_i32(cpu_VF, flag);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
tcg_gen_mov_i64(dest, result);
tcg_temp_free_i64(result);
@@ -580,8 +586,8 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, 0);
- tcg_gen_trunc_i64_i32(t0_32, t0);
- tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
@@ -609,15 +615,14 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
gen_set_NZ64(result);
tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
- tcg_gen_trunc_i64_i32(cpu_CF, flag);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
tcg_gen_xor_i64(flag, result, t0);
tmp = tcg_temp_new_i64();
tcg_gen_xor_i64(tmp, t0, t1);
tcg_gen_and_i64(flag, flag, tmp);
tcg_temp_free_i64(tmp);
- tcg_gen_shri_i64(flag, flag, 32);
- tcg_gen_trunc_i64_i32(cpu_VF, flag);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
tcg_gen_mov_i64(dest, result);
tcg_temp_free_i64(flag);
tcg_temp_free_i64(result);
@@ -627,8 +632,8 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
TCGv_i32 t1_32 = tcg_temp_new_i32();
TCGv_i32 tmp;
- tcg_gen_trunc_i64_i32(t0_32, t0);
- tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
@@ -670,14 +675,13 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
tcg_gen_extu_i32_i64(cf_64, cpu_CF);
tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
- tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
+ tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
gen_set_NZ64(result);
tcg_gen_xor_i64(vf_64, result, t0);
tcg_gen_xor_i64(tmp, t0, t1);
tcg_gen_andc_i64(vf_64, vf_64, tmp);
- tcg_gen_shri_i64(vf_64, vf_64, 32);
- tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
+ tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
tcg_gen_mov_i64(dest, result);
@@ -691,8 +695,8 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
t1_32 = tcg_temp_new_i32();
tmp = tcg_const_i32(0);
- tcg_gen_trunc_i64_i32(t0_32, t0);
- tcg_gen_trunc_i64_i32(t1_32, t1);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
@@ -719,7 +723,7 @@ static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
TCGv_i64 tcg_addr, int size, int memidx)
{
g_assert(size <= 3);
- tcg_gen_qemu_st_i64(source, tcg_addr, memidx, MO_TE + size);
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
}
static void do_gpr_st(DisasContext *s, TCGv_i64 source,
@@ -734,7 +738,7 @@ static void do_gpr_st(DisasContext *s, TCGv_i64 source,
static void do_gpr_ld_memidx(DisasContext *s, TCGv_i64 dest, TCGv_i64 tcg_addr,
int size, bool is_signed, bool extend, int memidx)
{
- TCGMemOp memop = MO_TE + size;
+ TCGMemOp memop = s->be_data + size;
g_assert(size <= 3);
@@ -766,13 +770,18 @@ static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
TCGv_i64 tmp = tcg_temp_new_i64();
tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
if (size < 4) {
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TE + size);
+ tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
+ s->be_data + size);
} else {
+ bool be = s->be_data == MO_BE;
TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
- tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s), MO_TEQ);
- tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
+
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
- tcg_gen_qemu_st_i64(tmp, tcg_hiaddr, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
+ s->be_data | MO_Q);
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
+ tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
+ s->be_data | MO_Q);
tcg_temp_free_i64(tcg_hiaddr);
}
@@ -789,17 +798,21 @@ static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
TCGv_i64 tmphi;
if (size < 4) {
- TCGMemOp memop = MO_TE + size;
+ TCGMemOp memop = s->be_data + size;
tmphi = tcg_const_i64(0);
tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
} else {
+ bool be = s->be_data == MO_BE;
TCGv_i64 tcg_hiaddr;
+
tmphi = tcg_temp_new_i64();
tcg_hiaddr = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), MO_TEQ);
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
- tcg_gen_qemu_ld_i64(tmphi, tcg_hiaddr, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
+ s->be_data | MO_Q);
+ tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
+ s->be_data | MO_Q);
tcg_temp_free_i64(tcg_hiaddr);
}
@@ -938,7 +951,7 @@ static void clear_vec_high(DisasContext *s, int rd)
static void do_vec_st(DisasContext *s, int srcidx, int element,
TCGv_i64 tcg_addr, int size)
{
- TCGMemOp memop = MO_TE + size;
+ TCGMemOp memop = s->be_data + size;
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
read_vec_element(s, tcg_tmp, srcidx, element, size);
@@ -951,7 +964,7 @@ static void do_vec_st(DisasContext *s, int srcidx, int element,
static void do_vec_ld(DisasContext *s, int destidx, int element,
TCGv_i64 tcg_addr, int size)
{
- TCGMemOp memop = MO_TE + size;
+ TCGMemOp memop = s->be_data + size;
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
@@ -1234,9 +1247,15 @@ static void handle_sync(DisasContext *s, uint32_t insn,
return;
case 4: /* DSB */
case 5: /* DMB */
- case 6: /* ISB */
/* We don't emulate caches so barriers are no-ops */
return;
+ case 6: /* ISB */
+ /* We need to break the TB after this insn to execute
+ * a self-modified code correctly and also to take
+ * any pending interrupts immediately.
+ */
+ s->is_jmp = DISAS_UPDATE;
+ return;
default:
unallocated_encoding(s);
return;
@@ -1301,7 +1320,7 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
TCGv_i32 nzcv = tcg_temp_new_i32();
/* take NZCV from R[t] */
- tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
/* bit 31, N */
tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
@@ -1357,16 +1376,18 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
* runtime; this may result in an exception.
*/
TCGv_ptr tmpptr;
- TCGv_i32 tcg_syn;
+ TCGv_i32 tcg_syn, tcg_isread;
uint32_t syndrome;
gen_a64_set_pc_im(s->pc - 4);
tmpptr = tcg_const_ptr(ri);
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
tcg_syn = tcg_const_i32(syndrome);
- gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
+ tcg_isread = tcg_const_i32(isread);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
tcg_temp_free_ptr(tmpptr);
tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_isread);
}
/* Handle special cases first */
@@ -1553,8 +1574,27 @@ static void disas_exc(DisasContext *s, uint32_t insn)
unallocated_encoding(s);
break;
}
- /* HLT */
- unsupported_encoding(s, insn);
+ /* HLT. This has two purposes.
+ * Architecturally, it is an external halting debug instruction.
+ * Since QEMU doesn't implement external debug, we treat this as
+ * it is required for halting debug disabled: it will UNDEF.
+ * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
+ */
+ if (semihosting_enabled() && imm16 == 0xf000) {
+#ifndef CONFIG_USER_ONLY
+ /* In system mode, don't allow userspace access to semihosting,
+ * to provide some semblance of security (and for consistency
+ * with our 32-bit semihosting).
+ */
+ if (s->current_el == 0) {
+ unsupported_encoding(s, insn);
+ break;
+ }
+#endif
+ gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ } else {
+ unsupported_encoding(s, insn);
+ }
break;
case 5:
if (op2_ll < 1 || op2_ll > 3) {
@@ -1671,7 +1711,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i64 addr, int size, bool is_pair)
{
TCGv_i64 tmp = tcg_temp_new_i64();
- TCGMemOp memop = MO_TE + size;
+ TCGMemOp memop = s->be_data + size;
g_assert(size <= 3);
tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
@@ -1733,7 +1773,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
tmp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), MO_TE + size);
+ tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), s->be_data + size);
tcg_gen_brcond_i64(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
tcg_temp_free_i64(tmp);
@@ -1742,7 +1782,8 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i64 tmphi = tcg_temp_new_i64();
tcg_gen_addi_i64(addrhi, addr, 1 << size);
- tcg_gen_qemu_ld_i64(tmphi, addrhi, get_mem_index(s), MO_TE + size);
+ tcg_gen_qemu_ld_i64(tmphi, addrhi, get_mem_index(s),
+ s->be_data + size);
tcg_gen_brcond_i64(TCG_COND_NE, tmphi, cpu_exclusive_high, fail_label);
tcg_temp_free_i64(tmphi);
@@ -1750,13 +1791,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
}
/* We seem to still have the exclusive monitor, so do the store */
- tcg_gen_qemu_st_i64(cpu_reg(s, rt), addr, get_mem_index(s), MO_TE + size);
+ tcg_gen_qemu_st_i64(cpu_reg(s, rt), addr, get_mem_index(s),
+ s->be_data + size);
if (is_pair) {
TCGv_i64 addrhi = tcg_temp_new_i64();
tcg_gen_addi_i64(addrhi, addr, 1 << size);
tcg_gen_qemu_st_i64(cpu_reg(s, rt2), addrhi,
- get_mem_index(s), MO_TE + size);
+ get_mem_index(s), s->be_data + size);
tcg_temp_free_i64(addrhi);
}
@@ -1784,9 +1826,6 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
* o2: 0 -> exclusive, 1 -> not
* o1: 0 -> single register, 1 -> register pair
* o0: 1 -> load-acquire/store-release, 0 -> not
- *
- * o0 == 0 AND o2 == 1 is un-allocated
- * o1 == 1 is un-allocated except for 32 and 64 bit sizes
*/
static void disas_ldst_excl(DisasContext *s, uint32_t insn)
{
@@ -1801,7 +1840,8 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
int size = extract32(insn, 30, 2);
TCGv_i64 tcg_addr;
- if ((!is_excl && !is_lasr) ||
+ if ((!is_excl && !is_pair && !is_lasr) ||
+ (!is_excl && is_pair) ||
(is_pair && size < 2)) {
unallocated_encoding(s);
return;
@@ -1830,15 +1870,6 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
} else {
do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false);
}
- if (is_pair) {
- TCGv_i64 tcg_rt2 = cpu_reg(s, rt);
- tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
- if (is_store) {
- do_gpr_st(s, tcg_rt2, tcg_addr, size);
- } else {
- do_gpr_ld(s, tcg_rt2, tcg_addr, size, false, false);
- }
- }
}
}
@@ -2582,7 +2613,7 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
TCGv_i64 tcg_tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
- get_mem_index(s), MO_TE + scale);
+ get_mem_index(s), s->be_data + scale);
switch (scale) {
case 0:
mulconst = 0x0101010101010101ULL;
@@ -2612,9 +2643,9 @@ static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
} else {
/* Load/store one element per register */
if (is_load) {
- do_vec_ld(s, rt, index, tcg_addr, MO_TE + scale);
+ do_vec_ld(s, rt, index, tcg_addr, s->be_data + scale);
} else {
- do_vec_st(s, rt, index, tcg_addr, MO_TE + scale);
+ do_vec_st(s, rt, index, tcg_addr, s->be_data + scale);
}
}
tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
@@ -2992,9 +3023,51 @@ static void disas_bitfield(DisasContext *s, uint32_t insn)
}
tcg_rd = cpu_reg(s, rd);
- tcg_tmp = read_cpu_reg(s, rn, sf);
- /* OPTME: probably worth recognizing common cases of ext{8,16,32}{u,s} */
+ /* Suppress the zero-extend for !sf. Since RI and SI are constrained
+ to be smaller than bitsize, we'll never reference data outside the
+ low 32-bits anyway. */
+ tcg_tmp = read_cpu_reg(s, rn, 1);
+
+ /* Recognize the common aliases. */
+ if (opc == 0) { /* SBFM */
+ if (ri == 0) {
+ if (si == 7) { /* SXTB */
+ tcg_gen_ext8s_i64(tcg_rd, tcg_tmp);
+ goto done;
+ } else if (si == 15) { /* SXTH */
+ tcg_gen_ext16s_i64(tcg_rd, tcg_tmp);
+ goto done;
+ } else if (si == 31) { /* SXTW */
+ tcg_gen_ext32s_i64(tcg_rd, tcg_tmp);
+ goto done;
+ }
+ }
+ if (si == 63 || (si == 31 && ri <= si)) { /* ASR */
+ if (si == 31) {
+ tcg_gen_ext32s_i64(tcg_tmp, tcg_tmp);
+ }
+ tcg_gen_sari_i64(tcg_rd, tcg_tmp, ri);
+ goto done;
+ }
+ } else if (opc == 2) { /* UBFM */
+ if (ri == 0) { /* UXTB, UXTH, plus non-canonical AND */
+ tcg_gen_andi_i64(tcg_rd, tcg_tmp, bitmask64(si + 1));
+ return;
+ }
+ if (si == 63 || (si == 31 && ri <= si)) { /* LSR */
+ if (si == 31) {
+ tcg_gen_ext32u_i64(tcg_tmp, tcg_tmp);
+ }
+ tcg_gen_shri_i64(tcg_rd, tcg_tmp, ri);
+ return;
+ }
+ if (si + 1 == ri && si != bitsize - 1) { /* LSL */
+ int shift = bitsize - 1 - si;
+ tcg_gen_shli_i64(tcg_rd, tcg_tmp, shift);
+ goto done;
+ }
+ }
if (opc != 1) { /* SBFM or UBFM */
tcg_gen_movi_i64(tcg_rd, 0);
@@ -3019,6 +3092,7 @@ static void disas_bitfield(DisasContext *s, uint32_t insn)
tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
}
+ done:
if (!sf) { /* zero extend final result */
tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
@@ -3051,17 +3125,7 @@ static void disas_extract(DisasContext *s, uint32_t insn)
tcg_rd = cpu_reg(s, rd);
- if (imm) {
- /* OPTME: we can special case rm==rn as a rotate */
- tcg_rm = read_cpu_reg(s, rm, sf);
- tcg_rn = read_cpu_reg(s, rn, sf);
- tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
- tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
- tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
- }
- } else {
+ if (unlikely(imm == 0)) {
/* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
* so an extract from bit 0 is a special case.
*/
@@ -3070,8 +3134,27 @@ static void disas_extract(DisasContext *s, uint32_t insn)
} else {
tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
}
+ } else if (rm == rn) { /* ROR */
+ tcg_rm = cpu_reg(s, rm);
+ if (sf) {
+ tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tcg_rm);
+ tcg_gen_rotri_i32(tmp, tmp, imm);
+ tcg_gen_extu_i32_i64(tcg_rd, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ } else {
+ tcg_rm = read_cpu_reg(s, rm, sf);
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
+ tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
+ tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
}
-
}
}
@@ -3131,8 +3214,8 @@ static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(t0, src);
- tcg_gen_trunc_i64_i32(t1, shift_amount);
+ tcg_gen_extrl_i64_i32(t0, src);
+ tcg_gen_extrl_i64_i32(t1, shift_amount);
tcg_gen_rotr_i32(t0, t0, t1);
tcg_gen_extu_i32_i64(dst, t0);
tcg_temp_free_i32(t0);
@@ -3547,8 +3630,9 @@ static void disas_adc_sbc(DisasContext *s, uint32_t insn)
static void disas_cc(DisasContext *s, uint32_t insn)
{
unsigned int sf, op, y, cond, rn, nzcv, is_imm;
- TCGLabel *label_continue = NULL;
+ TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
+ DisasCompare c;
if (!extract32(insn, 29, 1)) {
unallocated_encoding(s);
@@ -3566,19 +3650,13 @@ static void disas_cc(DisasContext *s, uint32_t insn)
rn = extract32(insn, 5, 5);
nzcv = extract32(insn, 0, 4);
- if (cond < 0x0e) { /* not always */
- TCGLabel *label_match = gen_new_label();
- label_continue = gen_new_label();
- arm_gen_test_cc(cond, label_match);
- /* nomatch: */
- tcg_tmp = tcg_temp_new_i64();
- tcg_gen_movi_i64(tcg_tmp, nzcv << 28);
- gen_set_nzcv(tcg_tmp);
- tcg_temp_free_i64(tcg_tmp);
- tcg_gen_br(label_continue);
- gen_set_label(label_match);
- }
- /* match, or condition is always */
+ /* Set T0 = !COND. */
+ tcg_t0 = tcg_temp_new_i32();
+ arm_test_cc(&c, cond);
+ tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
+ arm_free_cc(&c);
+
+ /* Load the arguments for the new comparison. */
if (is_imm) {
tcg_y = new_tmp_a64(s);
tcg_gen_movi_i64(tcg_y, y);
@@ -3587,6 +3665,7 @@ static void disas_cc(DisasContext *s, uint32_t insn)
}
tcg_rn = cpu_reg(s, rn);
+ /* Set the flags for the new comparison. */
tcg_tmp = tcg_temp_new_i64();
if (op) {
gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
@@ -3595,9 +3674,55 @@ static void disas_cc(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i64(tcg_tmp);
- if (cond < 0x0e) { /* continue */
- gen_set_label(label_continue);
+ /* If COND was false, force the flags to #nzcv. Compute two masks
+ * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
+ * For tcg hosts that support ANDC, we can make do with just T1.
+ * In either case, allow the tcg optimizer to delete any unused mask.
+ */
+ tcg_t1 = tcg_temp_new_i32();
+ tcg_t2 = tcg_temp_new_i32();
+ tcg_gen_neg_i32(tcg_t1, tcg_t0);
+ tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
+
+ if (nzcv & 8) { /* N */
+ tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
+ }
}
+ if (nzcv & 4) { /* Z */
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
+ }
+ } else {
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
+ }
+ if (nzcv & 2) { /* C */
+ tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
+ }
+ }
+ if (nzcv & 1) { /* V */
+ tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
+ }
+ }
+ tcg_temp_free_i32(tcg_t0);
+ tcg_temp_free_i32(tcg_t1);
+ tcg_temp_free_i32(tcg_t2);
}
/* C3.5.6 Conditional select
@@ -3609,7 +3734,8 @@ static void disas_cc(DisasContext *s, uint32_t insn)
static void disas_cond_select(DisasContext *s, uint32_t insn)
{
unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
- TCGv_i64 tcg_rd, tcg_src;
+ TCGv_i64 tcg_rd, zero;
+ DisasCompare64 c;
if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
/* S == 1 or op2<1> == 1 */
@@ -3624,48 +3750,35 @@ static void disas_cond_select(DisasContext *s, uint32_t insn)
rn = extract32(insn, 5, 5);
rd = extract32(insn, 0, 5);
- if (rd == 31) {
- /* silly no-op write; until we use movcond we must special-case
- * this to avoid a dead temporary across basic blocks.
- */
- return;
- }
-
tcg_rd = cpu_reg(s, rd);
- if (cond >= 0x0e) { /* condition "always" */
- tcg_src = read_cpu_reg(s, rn, sf);
- tcg_gen_mov_i64(tcg_rd, tcg_src);
- } else {
- /* OPTME: we could use movcond here, at the cost of duplicating
- * a lot of the arm_gen_test_cc() logic.
- */
- TCGLabel *label_match = gen_new_label();
- TCGLabel *label_continue = gen_new_label();
-
- arm_gen_test_cc(cond, label_match);
- /* nomatch: */
- tcg_src = cpu_reg(s, rm);
+ a64_test_cc(&c, cond);
+ zero = tcg_const_i64(0);
+ if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
+ /* CSET & CSETM. */
+ tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
+ if (else_inv) {
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ TCGv_i64 t_true = cpu_reg(s, rn);
+ TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
if (else_inv && else_inc) {
- tcg_gen_neg_i64(tcg_rd, tcg_src);
+ tcg_gen_neg_i64(t_false, t_false);
} else if (else_inv) {
- tcg_gen_not_i64(tcg_rd, tcg_src);
+ tcg_gen_not_i64(t_false, t_false);
} else if (else_inc) {
- tcg_gen_addi_i64(tcg_rd, tcg_src, 1);
- } else {
- tcg_gen_mov_i64(tcg_rd, tcg_src);
- }
- if (!sf) {
- tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ tcg_gen_addi_i64(t_false, t_false, 1);
}
- tcg_gen_br(label_continue);
- /* match: */
- gen_set_label(label_match);
- tcg_src = read_cpu_reg(s, rn, sf);
- tcg_gen_mov_i64(tcg_rd, tcg_src);
- /* continue: */
- gen_set_label(label_continue);
+ tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
+ }
+
+ tcg_temp_free_i64(zero);
+ a64_free_cc(&c);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
}
}
@@ -3680,7 +3793,7 @@ static void handle_clz(DisasContext *s, unsigned int sf,
gen_helper_clz64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
gen_helper_clz(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
@@ -3698,7 +3811,7 @@ static void handle_cls(DisasContext *s, unsigned int sf,
gen_helper_cls64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
gen_helper_cls32(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
@@ -3716,7 +3829,7 @@ static void handle_rbit(DisasContext *s, unsigned int sf,
gen_helper_rbit64(tcg_rd, tcg_rn);
} else {
TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn);
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
gen_helper_rbit(tcg_tmp32, tcg_tmp32);
tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
tcg_temp_free_i32(tcg_tmp32);
@@ -4152,20 +4265,6 @@ static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
}
}
-/* copy src FP register to dst FP register; type specifies single or double */
-static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src)
-{
- if (type) {
- TCGv_i64 v = read_fp_dreg(s, src);
- write_fp_dreg(s, dst, v);
- tcg_temp_free_i64(v);
- } else {
- TCGv_i32 v = read_fp_sreg(s, src);
- write_fp_sreg(s, dst, v);
- tcg_temp_free_i32(v);
- }
-}
-
/* C3.6.24 Floating point conditional select
* 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
* +---+---+---+-----------+------+---+------+------+-----+------+------+
@@ -4175,7 +4274,8 @@ static void gen_mov_fp2fp(DisasContext *s, int type, int dst, int src)
static void disas_fp_csel(DisasContext *s, uint32_t insn)
{
unsigned int mos, type, rm, cond, rn, rd;
- TCGLabel *label_continue = NULL;
+ TCGv_i64 t_true, t_false, t_zero;
+ DisasCompare64 c;
mos = extract32(insn, 29, 3);
type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
@@ -4193,21 +4293,23 @@ static void disas_fp_csel(DisasContext *s, uint32_t insn)
return;
}
- if (cond < 0x0e) { /* not always */
- TCGLabel *label_match = gen_new_label();
- label_continue = gen_new_label();
- arm_gen_test_cc(cond, label_match);
- /* nomatch: */
- gen_mov_fp2fp(s, type, rd, rm);
- tcg_gen_br(label_continue);
- gen_set_label(label_match);
- }
+ /* Zero extend sreg inputs to 64 bits now. */
+ t_true = tcg_temp_new_i64();
+ t_false = tcg_temp_new_i64();
+ read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
+ read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
- gen_mov_fp2fp(s, type, rd, rn);
+ a64_test_cc(&c, cond);
+ t_zero = tcg_const_i64(0);
+ tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
+ tcg_temp_free_i64(t_zero);
+ tcg_temp_free_i64(t_false);
+ a64_free_cc(&c);
- if (cond < 0x0e) { /* continue */
- gen_set_label(label_continue);
- }
+ /* Note that sregs write back zeros to the high bits,
+ and we've already done the zero-extension. */
+ write_fp_dreg(s, rd, t_true);
+ tcg_temp_free_i64(t_true);
}
/* C3.6.25 Floating-point data-processing (1 source) - single precision */
@@ -5475,16 +5577,16 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
assert(elements == 4);
read_vec_element(s, tcg_elt, rn, 0, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
read_vec_element(s, tcg_elt, rn, 1, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
read_vec_element(s, tcg_elt, rn, 2, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
read_vec_element(s, tcg_elt, rn, 3, MO_32);
- tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt);
+ tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
@@ -7647,7 +7749,7 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
static NeonGenNarrowFn * const xtnfns[3] = {
gen_helper_neon_narrow_u8,
gen_helper_neon_narrow_u16,
- tcg_gen_trunc_i64_i32,
+ tcg_gen_extrl_i64_i32,
};
static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
gen_helper_neon_unarrow_sat8,
@@ -7681,10 +7783,8 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar,
} else {
TCGv_i32 tcg_lo = tcg_temp_new_i32();
TCGv_i32 tcg_hi = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tcg_lo, tcg_op);
+ tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
- tcg_gen_shri_i64(tcg_op, tcg_op, 32);
- tcg_gen_trunc_i64_i32(tcg_hi, tcg_op);
gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
tcg_temp_free_i32(tcg_lo);
@@ -8590,16 +8690,10 @@ static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
}
}
-static void do_narrow_high_u32(TCGv_i32 res, TCGv_i64 in)
-{
- tcg_gen_shri_i64(in, in, 32);
- tcg_gen_trunc_i64_i32(res, in);
-}
-
static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
{
tcg_gen_addi_i64(in, in, 1U << 31);
- do_narrow_high_u32(res, in);
+ tcg_gen_extrh_i64_i32(res, in);
}
static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
@@ -8618,7 +8712,7 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
gen_helper_neon_narrow_round_high_u8 },
{ gen_helper_neon_narrow_high_u16,
gen_helper_neon_narrow_round_high_u16 },
- { do_narrow_high_u32, do_narrow_round_high_u32 },
+ { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
};
NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
@@ -10883,7 +10977,7 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
{
uint32_t insn;
- insn = arm_ldl_code(env, s->pc, s->bswap_code);
+ insn = arm_ldl_code(env, s->pc, s->sctlr_b);
s->insn = insn;
s->pc += 4;
@@ -10922,15 +11016,11 @@ static void disas_a64_insn(CPUARMState *env, DisasContext *s)
free_tmp_a64(s);
}
-void gen_intermediate_code_internal_a64(ARMCPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
{
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
- CPUBreakpoint *bp;
- int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
int num_insns;
@@ -10946,9 +11036,14 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
dc->condjmp = 0;
dc->aarch64 = 1;
- dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
dc->thumb = 0;
- dc->bswap_code = 0;
+ dc->sctlr_b = 0;
+ dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = 0;
dc->condexec_cond = 0;
dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
@@ -10985,51 +11080,51 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
init_tmp_a64_array(dc);
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
- lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
}
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
gen_tb_start(tb);
tcg_clear_temp_count();
do {
+ tcg_gen_insn_start(dc->pc, 0);
+ num_insns++;
+
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
- /* Advance PC so that clearing the breakpoint will
- invalidate this TB. */
- dc->pc += 2;
- goto done_generating;
- }
- }
- }
-
- if (search_pc) {
- j = tcg_op_buf_count();
- if (lj < j) {
- lj++;
- while (lj < j) {
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
+ if (bp->flags & BP_CPU) {
+ gen_a64_set_pc_im(dc->pc);
+ gen_helper_check_breakpoints(cpu_env);
+ /* End the TB early; it likely won't be executed */
+ dc->is_jmp = DISAS_UPDATE;
+ } else {
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ /* The address covered by the breakpoint must be
+ included in [tb->pc, tb->pc + tb->size) in order
+ to for it to be properly cleared -- thus we
+ increment the PC here so that the logic setting
+ tb->size below does the right thing. */
+ dc->pc += 4;
+ goto done_generating;
+ }
+ break;
}
}
- tcg_ctx.gen_opc_pc[lj] = dc->pc;
- tcg_ctx.gen_opc_instr_start[lj] = 1;
- tcg_ctx.gen_opc_icount[lj] = num_insns;
}
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
}
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(dc->pc);
- }
-
if (dc->ss_active && !dc->pstate_ss) {
/* Singlestep state is Active-pending.
* If we're in this state at the start of a TB then either
@@ -11041,7 +11136,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
* "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero.
*/
- assert(num_insns == 0);
+ assert(num_insns == 1);
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
default_exception_el(dc));
dc->is_jmp = DISAS_EXC;
@@ -11060,7 +11155,6 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place.
*/
- num_insns++;
} while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
@@ -11131,22 +11225,15 @@ done_generating:
gen_tb_end(tb, num_insns);
#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
+ qemu_log_in_addr_range(pc_start)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start,
- 4 | (dc->bswap_code << 1));
+ 4 | (bswap_code(dc->sctlr_b) ? 2 : 0));
qemu_log("\n");
}
#endif
- if (search_pc) {
- j = tcg_op_buf_count();
- lj++;
- while (lj <= j) {
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- } else {
- tb->size = dc->pc - pc_start;
- tb->icount = num_insns;
- }
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
}
diff --git a/qemu/target-arm/translate.c b/qemu/target-arm/translate.c
index 69ac18c10..940ec8d98 100644
--- a/qemu/target-arm/translate.c
+++ b/qemu/target-arm/translate.c
@@ -18,11 +18,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
+#include "qemu/osdep.h"
#include "cpu.h"
#include "internals.h"
@@ -36,6 +32,7 @@
#include "exec/helper-gen.h"
#include "trace-tcg.h"
+#include "exec/log.h"
#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
@@ -52,7 +49,6 @@
#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
#include "translate.h"
-static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#if defined(CONFIG_USER_ONLY)
#define IS_USER(s) 1
@@ -60,16 +56,16 @@ static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
#define IS_USER(s) (s->user)
#endif
-TCGv_ptr cpu_env;
+TCGv_env cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
-static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
-static TCGv_i64 cpu_exclusive_addr;
-static TCGv_i64 cpu_exclusive_val;
+TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
+TCGv_i64 cpu_exclusive_addr;
+TCGv_i64 cpu_exclusive_val;
#ifdef CONFIG_USER_ONLY
-static TCGv_i64 cpu_exclusive_test;
-static TCGv_i32 cpu_exclusive_info;
+TCGv_i64 cpu_exclusive_test;
+TCGv_i32 cpu_exclusive_info;
#endif
/* FIXME: These should be removed. */
@@ -90,23 +86,23 @@ void arm_translate_init(void)
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
for (i = 0; i < 16; i++) {
- cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUARMState, regs[i]),
regnames[i]);
}
- cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
- cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
- cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
- cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
+ cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
+ cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
+ cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
+ cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
- cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
- cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
#ifdef CONFIG_USER_ONLY
- cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_exclusive_test = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUARMState, exclusive_test), "exclusive_test");
- cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_info = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUARMState, exclusive_info), "exclusive_info");
#endif
@@ -738,81 +734,113 @@ static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
#undef PAS_OP
/*
- * generate a conditional branch based on ARM condition code cc.
+ * Generate a conditional based on ARM condition code cc.
* This is common between ARM and Aarch64 targets.
*/
-void arm_gen_test_cc(int cc, TCGLabel *label)
+void arm_test_cc(DisasCompare *cmp, int cc)
{
- TCGv_i32 tmp;
- TCGLabel *inv;
+ TCGv_i32 value;
+ TCGCond cond;
+ bool global = true;
switch (cc) {
case 0: /* eq: Z */
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
- break;
case 1: /* ne: !Z */
- tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
+ cond = TCG_COND_EQ;
+ value = cpu_ZF;
break;
+
case 2: /* cs: C */
- tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
- break;
case 3: /* cc: !C */
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
+ cond = TCG_COND_NE;
+ value = cpu_CF;
break;
+
case 4: /* mi: N */
- tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
- break;
case 5: /* pl: !N */
- tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
+ cond = TCG_COND_LT;
+ value = cpu_NF;
break;
+
case 6: /* vs: V */
- tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
- break;
case 7: /* vc: !V */
- tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
+ cond = TCG_COND_LT;
+ value = cpu_VF;
break;
+
case 8: /* hi: C && !Z */
- inv = gen_new_label();
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
- tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
- gen_set_label(inv);
- break;
- case 9: /* ls: !C || Z */
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
+ case 9: /* ls: !C || Z -> !(C && !Z) */
+ cond = TCG_COND_NE;
+ value = tcg_temp_new_i32();
+ global = false;
+ /* CF is 1 for C, so -CF is an all-bits-set mask for C;
+ ZF is non-zero for !Z; so AND the two subexpressions. */
+ tcg_gen_neg_i32(value, cpu_CF);
+ tcg_gen_and_i32(value, value, cpu_ZF);
break;
+
case 10: /* ge: N == V -> N ^ V == 0 */
- tmp = tcg_temp_new_i32();
- tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
- tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
- tcg_temp_free_i32(tmp);
- break;
case 11: /* lt: N != V -> N ^ V != 0 */
- tmp = tcg_temp_new_i32();
- tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
- tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
- tcg_temp_free_i32(tmp);
+ /* Since we're only interested in the sign bit, == 0 is >= 0. */
+ cond = TCG_COND_GE;
+ value = tcg_temp_new_i32();
+ global = false;
+ tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
break;
+
case 12: /* gt: !Z && N == V */
- inv = gen_new_label();
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
- tmp = tcg_temp_new_i32();
- tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
- tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
- tcg_temp_free_i32(tmp);
- gen_set_label(inv);
- break;
case 13: /* le: Z || N != V */
- tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
- tmp = tcg_temp_new_i32();
- tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
- tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
- tcg_temp_free_i32(tmp);
+ cond = TCG_COND_NE;
+ value = tcg_temp_new_i32();
+ global = false;
+ /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
+ * the sign bit then AND with ZF to yield the result. */
+ tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
+ tcg_gen_sari_i32(value, value, 31);
+ tcg_gen_andc_i32(value, cpu_ZF, value);
break;
+
+ case 14: /* always */
+ case 15: /* always */
+ /* Use the ALWAYS condition, which will fold early.
+ * It doesn't matter what we use for the value. */
+ cond = TCG_COND_ALWAYS;
+ value = cpu_ZF;
+ goto no_invert;
+
default:
fprintf(stderr, "Bad condition code 0x%x\n", cc);
abort();
}
+
+ if (cc & 1) {
+ cond = tcg_invert_cond(cond);
+ }
+
+ no_invert:
+ cmp->cond = cond;
+ cmp->value = value;
+ cmp->value_global = global;
+}
+
+void arm_free_cc(DisasCompare *cmp)
+{
+ if (!cmp->value_global) {
+ tcg_temp_free_i32(cmp->value);
+ }
+}
+
+void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
+{
+ tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
+}
+
+void arm_gen_test_cc(int cc, TCGLabel *label)
+{
+ DisasCompare cmp;
+ arm_test_cc(&cmp, cc);
+ arm_jump_cc(&cmp, label);
+ arm_free_cc(&cmp);
}
static const uint8_t table_logic_cc[16] = {
@@ -839,7 +867,7 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr)
{
TCGv_i32 tmp;
- s->is_jmp = DISAS_UPDATE;
+ s->is_jmp = DISAS_JUMP;
if (s->thumb != (addr & 1)) {
tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, addr & 1);
@@ -852,7 +880,7 @@ static inline void gen_bx_im(DisasContext *s, uint32_t addr)
/* Set PC and Thumb state from var. var is marked as dead. */
static inline void gen_bx(DisasContext *s, TCGv_i32 var)
{
- s->is_jmp = DISAS_UPDATE;
+ s->is_jmp = DISAS_JUMP;
tcg_gen_andi_i32(cpu_R[15], var, ~1);
tcg_gen_andi_i32(var, var, 1);
store_cpu_field(var, thumb);
@@ -883,6 +911,12 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
}
}
+#ifdef CONFIG_USER_ONLY
+#define IS_USER_ONLY 1
+#else
+#define IS_USER_ONLY 0
+#endif
+
/* Abstractions of "generate code to do a guest load/store for
* AArch32", where a vaddr is always 32 bits (and is zero
* extended if we're a 64 bit core) and data is also
@@ -892,74 +926,143 @@ static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
*/
#if TARGET_LONG_BITS == 32
-#define DO_GEN_LD(SUFF, OPC) \
-static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
+#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
+static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 addr, int index) \
{ \
- tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
-}
-
-#define DO_GEN_ST(SUFF, OPC) \
-static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
+ TCGMemOp opc = (OPC) | s->be_data; \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ TCGv addr_be = tcg_temp_new(); \
+ tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
+ tcg_gen_qemu_ld_i32(val, addr_be, index, opc); \
+ tcg_temp_free(addr_be); \
+ return; \
+ } \
+ tcg_gen_qemu_ld_i32(val, addr, index, opc); \
+}
+
+#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
+static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 addr, int index) \
{ \
- tcg_gen_qemu_st_i32(val, addr, index, OPC); \
-}
-
-static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
-{
- tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
+ TCGMemOp opc = (OPC) | s->be_data; \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ TCGv addr_be = tcg_temp_new(); \
+ tcg_gen_xori_i32(addr_be, addr, BE32_XOR); \
+ tcg_gen_qemu_st_i32(val, addr_be, index, opc); \
+ tcg_temp_free(addr_be); \
+ return; \
+ } \
+ tcg_gen_qemu_st_i32(val, addr, index, opc); \
+}
+
+static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 addr, int index)
+{
+ TCGMemOp opc = MO_Q | s->be_data;
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
}
-static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
+static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 addr, int index)
{
- tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
+ TCGMemOp opc = MO_Q | s->be_data;
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
+ tcg_temp_free_i64(tmp);
+ return;
+ }
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
}
#else
-#define DO_GEN_LD(SUFF, OPC) \
-static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
+#define DO_GEN_LD(SUFF, OPC, BE32_XOR) \
+static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 addr, int index) \
{ \
+ TCGMemOp opc = (OPC) | s->be_data; \
TCGv addr64 = tcg_temp_new(); \
tcg_gen_extu_i32_i64(addr64, addr); \
- tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
+ } \
+ tcg_gen_qemu_ld_i32(val, addr64, index, opc); \
tcg_temp_free(addr64); \
}
-#define DO_GEN_ST(SUFF, OPC) \
-static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
+#define DO_GEN_ST(SUFF, OPC, BE32_XOR) \
+static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 addr, int index) \
{ \
+ TCGMemOp opc = (OPC) | s->be_data; \
TCGv addr64 = tcg_temp_new(); \
tcg_gen_extu_i32_i64(addr64, addr); \
- tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */ \
+ if (!IS_USER_ONLY && s->sctlr_b && BE32_XOR) { \
+ tcg_gen_xori_i64(addr64, addr64, BE32_XOR); \
+ } \
+ tcg_gen_qemu_st_i32(val, addr64, index, opc); \
tcg_temp_free(addr64); \
}
-static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
+static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 addr, int index)
{
+ TCGMemOp opc = MO_Q | s->be_data;
TCGv addr64 = tcg_temp_new();
tcg_gen_extu_i32_i64(addr64, addr);
- tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
+ tcg_gen_qemu_ld_i64(val, addr64, index, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
tcg_temp_free(addr64);
}
-static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
+static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 addr, int index)
{
+ TCGMemOp opc = MO_Q | s->be_data;
TCGv addr64 = tcg_temp_new();
tcg_gen_extu_i32_i64(addr64, addr);
- tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr64, index, opc);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_qemu_st_i64(val, addr64, index, opc);
+ }
tcg_temp_free(addr64);
}
#endif
-DO_GEN_LD(8s, MO_SB)
-DO_GEN_LD(8u, MO_UB)
-DO_GEN_LD(16s, MO_TESW)
-DO_GEN_LD(16u, MO_TEUW)
-DO_GEN_LD(32u, MO_TEUL)
-DO_GEN_ST(8, MO_UB)
-DO_GEN_ST(16, MO_TEUW)
-DO_GEN_ST(32, MO_TEUL)
+DO_GEN_LD(8s, MO_SB, 3)
+DO_GEN_LD(8u, MO_UB, 3)
+DO_GEN_LD(16s, MO_SW, 2)
+DO_GEN_LD(16u, MO_UW, 2)
+DO_GEN_LD(32u, MO_UL, 0)
+/* 'a' variants include an alignment check */
+DO_GEN_LD(16ua, MO_UW | MO_ALIGN, 2)
+DO_GEN_LD(32ua, MO_UL | MO_ALIGN, 0)
+DO_GEN_ST(8, MO_UB, 3)
+DO_GEN_ST(16, MO_UW, 2)
+DO_GEN_ST(32, MO_UL, 0)
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
{
@@ -1031,7 +1134,7 @@ static void gen_exception_insn(DisasContext *s, int offset, int excp,
static inline void gen_lookup_tb(DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
- s->is_jmp = DISAS_UPDATE;
+ s->is_jmp = DISAS_JUMP;
}
static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
@@ -1254,18 +1357,18 @@ VFP_GEN_FIX(ulto, )
static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
{
if (dp) {
- gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
+ gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
} else {
- gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
}
}
static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
{
if (dp) {
- gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
+ gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
} else {
- gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
+ gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
}
}
@@ -1557,7 +1660,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
} else {
tmp = tcg_temp_new_i32();
iwmmxt_load_reg(cpu_V0, rd);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
}
tcg_gen_andi_i32(tmp, tmp, mask);
tcg_gen_mov_i32(dest, tmp);
@@ -1581,9 +1684,9 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
rdhi = (insn >> 16) & 0xf;
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
iwmmxt_load_reg(cpu_V0, wrd);
- tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
} else { /* TMCRR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
iwmmxt_store_reg(cpu_V0, wrd);
@@ -1601,24 +1704,24 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
if (insn & ARM_CP_RW_BIT) {
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
iwmmxt_store_creg(wrd, tmp);
} else {
i = 1;
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WLDRD */
- gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
+ gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
i = 0;
} else { /* WLDRW wRd */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
}
} else {
tmp = tcg_temp_new_i32();
if (insn & (1 << 22)) { /* WLDRH */
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
} else { /* WLDRB */
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
}
}
if (i) {
@@ -1630,24 +1733,24 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
} else {
if ((insn >> 28) == 0xf) { /* WSTRW wCx */
tmp = iwmmxt_load_creg(wrd);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
} else {
gen_op_iwmmxt_movq_M0_wRn(wrd);
tmp = tcg_temp_new_i32();
if (insn & (1 << 8)) {
if (insn & (1 << 22)) { /* WSTRD */
- gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
+ gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
} else { /* WSTRW wRd */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
}
} else {
if (insn & (1 << 22)) { /* WSTRH */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
} else { /* WSTRB */
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
}
}
}
@@ -1946,7 +2049,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
switch ((insn >> 22) & 3) {
case 0:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
if (insn & 8) {
tcg_gen_ext8s_i32(tmp, tmp);
} else {
@@ -1955,7 +2058,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
break;
case 1:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
if (insn & 8) {
tcg_gen_ext16s_i32(tmp, tmp);
} else {
@@ -1964,7 +2067,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
break;
case 2:
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
- tcg_gen_trunc_i64_i32(tmp, cpu_M0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
break;
}
store_reg(s, rd, tmp);
@@ -2627,9 +2730,9 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn)
if (insn & ARM_CP_RW_BIT) { /* MRA */
iwmmxt_load_reg(cpu_V0, acc);
- tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
} else { /* MAR */
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
@@ -2712,15 +2815,15 @@ static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
TCGv_i32 tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
gen_neon_dup_u8(tmp, 0);
break;
case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
gen_neon_dup_low16(tmp);
break;
case 2:
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
break;
default: /* Avoid compiler warnings. */
abort();
@@ -2951,7 +3054,7 @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
} else {
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
}
- tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
+ tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
tcg_temp_free_i32(tcg_tmp);
tcg_temp_free_i64(tcg_res);
@@ -3046,7 +3149,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
*/
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -4057,24 +4160,213 @@ static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val
return gen_set_psr(s, mask, spsr, tmp);
}
+static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
+ int *tgtmode, int *regno)
+{
+ /* Decode the r and sysm fields of MSR/MRS banked accesses into
+ * the target mode and register number, and identify the various
+ * unpredictable cases.
+ * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
+ * + executed in user mode
+ * + using R15 as the src/dest register
+ * + accessing an unimplemented register
+ * + accessing a register that's inaccessible at current PL/security state*
+ * + accessing a register that you could access with a different insn
+ * We choose to UNDEF in all these cases.
+ * Since we don't know which of the various AArch32 modes we are in
+ * we have to defer some checks to runtime.
+ * Accesses to Monitor mode registers from Secure EL1 (which implies
+ * that EL3 is AArch64) must trap to EL3.
+ *
+ * If the access checks fail this function will emit code to take
+ * an exception and return false. Otherwise it will return true,
+ * and set *tgtmode and *regno appropriately.
+ */
+ int exc_target = default_exception_el(s);
+
+ /* These instructions are present only in ARMv8, or in ARMv7 with the
+ * Virtualization Extensions.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
+ !arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ goto undef;
+ }
+
+ if (IS_USER(s) || rn == 15) {
+ goto undef;
+ }
+
+ /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
+ * of registers into (r, sysm).
+ */
+ if (r) {
+ /* SPSRs for other modes */
+ switch (sysm) {
+ case 0xe: /* SPSR_fiq */
+ *tgtmode = ARM_CPU_MODE_FIQ;
+ break;
+ case 0x10: /* SPSR_irq */
+ *tgtmode = ARM_CPU_MODE_IRQ;
+ break;
+ case 0x12: /* SPSR_svc */
+ *tgtmode = ARM_CPU_MODE_SVC;
+ break;
+ case 0x14: /* SPSR_abt */
+ *tgtmode = ARM_CPU_MODE_ABT;
+ break;
+ case 0x16: /* SPSR_und */
+ *tgtmode = ARM_CPU_MODE_UND;
+ break;
+ case 0x1c: /* SPSR_mon */
+ *tgtmode = ARM_CPU_MODE_MON;
+ break;
+ case 0x1e: /* SPSR_hyp */
+ *tgtmode = ARM_CPU_MODE_HYP;
+ break;
+ default: /* unallocated */
+ goto undef;
+ }
+ /* We arbitrarily assign SPSR a register number of 16. */
+ *regno = 16;
+ } else {
+ /* general purpose registers for other modes */
+ switch (sysm) {
+ case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
+ *tgtmode = ARM_CPU_MODE_USR;
+ *regno = sysm + 8;
+ break;
+ case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
+ *tgtmode = ARM_CPU_MODE_FIQ;
+ *regno = sysm;
+ break;
+ case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
+ *tgtmode = ARM_CPU_MODE_IRQ;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
+ *tgtmode = ARM_CPU_MODE_SVC;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
+ *tgtmode = ARM_CPU_MODE_ABT;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
+ *tgtmode = ARM_CPU_MODE_UND;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
+ *tgtmode = ARM_CPU_MODE_MON;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
+ *tgtmode = ARM_CPU_MODE_HYP;
+ /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
+ *regno = sysm & 1 ? 13 : 17;
+ break;
+ default: /* unallocated */
+ goto undef;
+ }
+ }
+
+ /* Catch the 'accessing inaccessible register' cases we can detect
+ * at translate time.
+ */
+ switch (*tgtmode) {
+ case ARM_CPU_MODE_MON:
+ if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
+ goto undef;
+ }
+ if (s->current_el == 1) {
+ /* If we're in Secure EL1 (which implies that EL3 is AArch64)
+ * then accesses to Mon registers trap to EL3
+ */
+ exc_target = 3;
+ goto undef;
+ }
+ break;
+ case ARM_CPU_MODE_HYP:
+ /* Note that we can forbid accesses from EL2 here because they
+ * must be from Hyp mode itself
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
+ goto undef;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+
+undef:
+ /* If we get here then some access check did not pass */
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
+ return false;
+}
+
+static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
+{
+ TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
+ int tgtmode = 0, regno = 0;
+
+ if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
+ return;
+ }
+
+ /* Sync state because msr_banked() can raise exceptions */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
+ tcg_reg = load_reg(s, rn);
+ tcg_tgtmode = tcg_const_i32(tgtmode);
+ tcg_regno = tcg_const_i32(regno);
+ gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
+ tcg_temp_free_i32(tcg_tgtmode);
+ tcg_temp_free_i32(tcg_regno);
+ tcg_temp_free_i32(tcg_reg);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
+{
+ TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
+ int tgtmode = 0, regno = 0;
+
+ if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
+ return;
+ }
+
+ /* Sync state because mrs_banked() can raise exceptions */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
+ tcg_reg = tcg_temp_new_i32();
+ tcg_tgtmode = tcg_const_i32(tgtmode);
+ tcg_regno = tcg_const_i32(regno);
+ gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
+ tcg_temp_free_i32(tcg_tgtmode);
+ tcg_temp_free_i32(tcg_regno);
+ store_reg(s, rn, tcg_reg);
+ s->is_jmp = DISAS_UPDATE;
+}
+
/* Generate an old-style exception return. Marks pc as dead. */
static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
{
TCGv_i32 tmp;
store_reg(s, 15, pc);
tmp = load_cpu_field(spsr);
- gen_set_cpsr(tmp, CPSR_ERET_MASK);
+ gen_helper_cpsr_write_eret(cpu_env, tmp);
tcg_temp_free_i32(tmp);
- s->is_jmp = DISAS_UPDATE;
+ s->is_jmp = DISAS_JUMP;
}
/* Generate a v6 exception return. Marks both values as dead. */
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
{
- gen_set_cpsr(cpsr, CPSR_ERET_MASK);
+ gen_helper_cpsr_write_eret(cpu_env, cpsr);
tcg_temp_free_i32(cpsr);
store_reg(s, 15, pc);
- s->is_jmp = DISAS_UPDATE;
+ s->is_jmp = DISAS_JUMP;
}
static void gen_nop_hint(DisasContext *s, int val)
@@ -4368,7 +4660,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
*/
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -4418,11 +4710,11 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
if (size == 3) {
tmp64 = tcg_temp_new_i64();
if (load) {
- gen_aa32_ld64(tmp64, addr, get_mem_index(s));
+ gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
neon_store_reg64(tmp64, rd);
} else {
neon_load_reg64(tmp64, rd);
- gen_aa32_st64(tmp64, addr, get_mem_index(s));
+ gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
}
tcg_temp_free_i64(tmp64);
tcg_gen_addi_i32(addr, addr, stride);
@@ -4431,21 +4723,21 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
if (size == 2) {
if (load) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
neon_store_reg(rd, pass, tmp);
} else {
tmp = neon_load_reg(rd, pass);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, stride);
} else if (size == 1) {
if (load) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
tcg_gen_shli_i32(tmp2, tmp2, 16);
tcg_gen_or_i32(tmp, tmp, tmp2);
@@ -4455,10 +4747,10 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
tmp = neon_load_reg(rd, pass);
tmp2 = tcg_temp_new_i32();
tcg_gen_shri_i32(tmp2, tmp, 16);
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
- gen_aa32_st16(tmp2, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_addi_i32(addr, addr, stride);
}
@@ -4467,7 +4759,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
TCGV_UNUSED_I32(tmp2);
for (n = 0; n < 4; n++) {
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, stride);
if (n == 0) {
tmp2 = tmp;
@@ -4487,7 +4779,7 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
} else {
tcg_gen_shri_i32(tmp, tmp2, n * 8);
}
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, stride);
}
@@ -4611,13 +4903,13 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
break;
default: /* Avoid compiler warnings. */
abort();
@@ -4635,13 +4927,13 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
tcg_gen_shri_i32(tmp, tmp, shift);
switch (size) {
case 0:
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
break;
}
tcg_temp_free_i32(tmp);
@@ -4683,7 +4975,7 @@ static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
switch (size) {
case 0: gen_helper_neon_narrow_u8(dest, src); break;
case 1: gen_helper_neon_narrow_u16(dest, src); break;
- case 2: tcg_gen_trunc_i64_i32(dest, src); break;
+ case 2: tcg_gen_extrl_i64_i32(dest, src); break;
default: abort();
}
}
@@ -5106,7 +5398,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
*/
if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
return 0;
}
@@ -6254,7 +6546,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
break;
case 2:
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
@@ -6269,7 +6561,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
case 2:
tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
- tcg_gen_trunc_i64_i32(tmp, cpu_V0);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
break;
default: abort();
}
@@ -7138,7 +7430,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
* call in order to handle c15_cpar.
*/
TCGv_ptr tmpptr;
- TCGv_i32 tcg_syn;
+ TCGv_i32 tcg_syn, tcg_isread;
uint32_t syndrome;
/* Note that since we are an implementation which takes an
@@ -7153,19 +7445,19 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
case 14:
if (is64) {
syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
- isread, s->thumb);
+ isread, false);
} else {
syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
- rt, isread, s->thumb);
+ rt, isread, false);
}
break;
case 15:
if (is64) {
syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
- isread, s->thumb);
+ isread, false);
} else {
syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
- rt, isread, s->thumb);
+ rt, isread, false);
}
break;
default:
@@ -7179,12 +7471,16 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
break;
}
+ gen_set_condexec(s);
gen_set_pc_im(s, s->pc - 4);
tmpptr = tcg_const_ptr(ri);
tcg_syn = tcg_const_i32(syndrome);
- gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
+ tcg_isread = tcg_const_i32(isread);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
+ tcg_isread);
tcg_temp_free_ptr(tmpptr);
tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_isread);
}
/* Handle special cases first */
@@ -7224,11 +7520,11 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn)
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
}
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
store_reg(s, rt, tmp);
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rt2, tmp);
} else {
@@ -7334,11 +7630,11 @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
{
TCGv_i32 tmp;
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, val);
+ tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rlow, tmp);
tmp = tcg_temp_new_i32();
tcg_gen_shri_i64(val, val, 32);
- tcg_gen_trunc_i64_i32(tmp, val);
+ tcg_gen_extrl_i64_i32(tmp, val);
store_reg(s, rhigh, tmp);
}
@@ -7400,14 +7696,14 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16ua(s, tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32ua(s, tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -7418,7 +7714,7 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
store_reg(s, rt2, tmp3);
@@ -7469,14 +7765,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -7487,7 +7783,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp3, tmp2, get_mem_index(s));
tcg_temp_free_i32(tmp2);
tcg_gen_concat_i32_i64(val64, tmp, tmp3);
tcg_temp_free_i32(tmp3);
@@ -7502,14 +7798,14 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
tmp = load_reg(s, rt);
switch (size) {
case 0:
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
break;
case 1:
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
break;
case 2:
case 3:
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -7518,7 +7814,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
if (size == 3) {
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rt2);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[rd], 0);
@@ -7543,8 +7839,68 @@ static void gen_srs(DisasContext *s,
uint32_t mode, uint32_t amode, bool writeback)
{
int32_t offset;
- TCGv_i32 addr = tcg_temp_new_i32();
- TCGv_i32 tmp = tcg_const_i32(mode);
+ TCGv_i32 addr, tmp;
+ bool undef = false;
+
+ /* SRS is:
+ * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
+ * and specified mode is monitor mode
+ * - UNDEFINED in Hyp mode
+ * - UNPREDICTABLE in User or System mode
+ * - UNPREDICTABLE if the specified mode is:
+ * -- not implemented
+ * -- not a valid mode number
+ * -- a mode that's at a higher exception level
+ * -- Monitor, if we are Non-secure
+ * For the UNPREDICTABLE cases we choose to UNDEF.
+ */
+ if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
+ return;
+ }
+
+ if (s->current_el == 0 || s->current_el == 2) {
+ undef = true;
+ }
+
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ break;
+ case ARM_CPU_MODE_HYP:
+ if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ undef = true;
+ }
+ break;
+ case ARM_CPU_MODE_MON:
+ /* No need to check specifically for "are we non-secure" because
+ * we've already made EL0 UNDEF and handled the trap for S-EL1;
+ * so if this isn't EL3 then we must be non-secure.
+ */
+ if (s->current_el != 3) {
+ undef = true;
+ }
+ break;
+ default:
+ undef = true;
+ }
+
+ if (undef) {
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+ return;
+ }
+
+ addr = tcg_temp_new_i32();
+ tmp = tcg_const_i32(mode);
+ /* get_r13_banked() will raise an exception if called from System mode */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
switch (amode) {
@@ -7565,11 +7921,11 @@ static void gen_srs(DisasContext *s,
}
tcg_gen_addi_i32(addr, addr, offset);
tmp = load_reg(s, 14);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tmp = load_cpu_field(spsr);
tcg_gen_addi_i32(addr, addr, 4);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
if (writeback) {
switch (amode) {
@@ -7594,6 +7950,7 @@ static void gen_srs(DisasContext *s,
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
+ s->is_jmp = DISAS_UPDATE;
}
static void disas_arm_insn(DisasContext *s, unsigned int insn)
@@ -7675,10 +8032,9 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
if ((insn & 0x0ffffdff) == 0x01010000) {
ARCH(6);
/* setend */
- if (((insn >> 9) & 1) != s->bswap_code) {
- /* Dynamic endianness switching not implemented. */
- qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
- goto illegal_op;
+ if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
+ gen_helper_setend(cpu_env);
+ s->is_jmp = DISAS_UPDATE;
}
return;
} else if ((insn & 0x0fffff00) == 0x057ff000) {
@@ -7689,18 +8045,21 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
return;
case 4: /* dsb */
case 5: /* dmb */
- case 6: /* isb */
ARCH(7);
/* We don't emulate caches so these are a no-op. */
return;
+ case 6: /* isb */
+ /* We need to break the TB after this insn to execute
+ * self-modifying code correctly and also to take
+ * any pending interrupts immediately.
+ */
+ gen_lookup_tb(s);
+ return;
default:
goto illegal_op;
}
} else if ((insn & 0x0e5fffe0) == 0x084d0500) {
/* srs */
- if (IS_USER(s)) {
- goto illegal_op;
- }
ARCH(6);
gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
return;
@@ -7724,10 +8083,10 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tcg_gen_addi_i32(addr, addr, offset);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
if (insn & (1 << 21)) {
/* Base writeback. */
switch (i) {
@@ -7852,7 +8211,26 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
sh = (insn >> 4) & 0xf;
rm = insn & 0xf;
switch (sh) {
- case 0x0: /* move program status register */
+ case 0x0: /* MSR, MRS */
+ if (insn & (1 << 9)) {
+ /* MSR (banked) and MRS (banked) */
+ int sysm = extract32(insn, 16, 4) |
+ (extract32(insn, 8, 1) << 4);
+ int r = extract32(insn, 22, 1);
+
+ if (op1 & 1) {
+ /* MSR (banked) */
+ gen_msr_banked(s, r, sysm, rm);
+ } else {
+ /* MRS (banked) */
+ int rd = extract32(insn, 12, 4);
+
+ gen_mrs_banked(s, r, sysm, rd);
+ }
+ break;
+ }
+
+ /* MSR, MRS (for PSRs) */
if (op1 & 1) {
/* PSR = reg */
tmp = load_reg(s, rm);
@@ -8013,7 +8391,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
@@ -8343,13 +8721,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = tcg_temp_new_i32();
switch (op1) {
case 0: /* lda */
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr,
+ get_mem_index(s));
break;
case 2: /* ldab */
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr,
+ get_mem_index(s));
break;
case 3: /* ldah */
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr,
+ get_mem_index(s));
break;
default:
abort();
@@ -8360,13 +8741,16 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = load_reg(s, rm);
switch (op1) {
case 0: /* stl */
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr,
+ get_mem_index(s));
break;
case 2: /* stlb */
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr,
+ get_mem_index(s));
break;
case 3: /* stlh */
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr,
+ get_mem_index(s));
break;
default:
abort();
@@ -8421,11 +8805,11 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = load_reg(s, rm);
tmp2 = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp2, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
} else {
- gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
}
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(addr);
@@ -8460,20 +8844,20 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
if (!load) {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd + 1);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
} else {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
rd++;
}
address_offset = -4;
@@ -8482,25 +8866,25 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
tmp = tcg_temp_new_i32();
switch (sh) {
case 1:
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
break;
case 2:
- gen_aa32_ld8s(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
break;
default:
case 3:
- gen_aa32_ld16s(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
break;
}
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* Perform base writeback before the loaded value to
ensure correct behavior with overlapping index registers.
- ldrd with base writeback is is undefined if the
+ ldrd with base writeback is undefined if the
destination and index registers overlap. */
if (!(insn & (1 << 24))) {
gen_add_datah_offset(s, insn, address_offset, addr);
@@ -8679,7 +9063,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rn, tmp);
break;
@@ -8848,17 +9232,17 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
/* load */
tmp = tcg_temp_new_i32();
if (insn & (1 << 22)) {
- gen_aa32_ld8u(tmp, tmp2, i);
+ gen_aa32_ld8u(s, tmp, tmp2, i);
} else {
- gen_aa32_ld32u(tmp, tmp2, i);
+ gen_aa32_ld32u(s, tmp, tmp2, i);
}
} else {
/* store */
tmp = load_reg(s, rd);
if (insn & (1 << 22)) {
- gen_aa32_st8(tmp, tmp2, i);
+ gen_aa32_st8(s, tmp, tmp2, i);
} else {
- gen_aa32_st32(tmp, tmp2, i);
+ gen_aa32_st32(s, tmp, tmp2, i);
}
tcg_temp_free_i32(tmp);
}
@@ -8931,7 +9315,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
if (is_load) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
if (user) {
tmp2 = tcg_const_i32(i);
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
@@ -8958,7 +9342,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
} else {
tmp = load_reg(s, i);
}
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
j++;
@@ -8996,9 +9380,9 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
- gen_set_cpsr(tmp, CPSR_ERET_MASK);
+ gen_helper_cpsr_write_eret(cpu_env, tmp);
tcg_temp_free_i32(tmp);
- s->is_jmp = DISAS_UPDATE;
+ s->is_jmp = DISAS_JUMP;
}
}
break;
@@ -9188,7 +9572,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
/* Fall through to 32-bit decode. */
}
- insn = arm_lduw_code(env, s->pc, s->bswap_code);
+ insn = arm_lduw_code(env, s->pc, s->sctlr_b);
s->pc += 2;
insn |= (uint32_t)insn_hw1 << 16;
@@ -9225,20 +9609,20 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (insn & (1 << 20)) {
/* ldrd */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
store_reg(s, rs, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* strd */
tmp = load_reg(s, rs);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
if (insn & (1 << 21)) {
@@ -9276,11 +9660,11 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_add_i32(addr, addr, tmp);
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
} else { /* tbb */
tcg_temp_free_i32(tmp);
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
}
tcg_temp_free_i32(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
@@ -9317,13 +9701,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = tcg_temp_new_i32();
switch (op) {
case 0: /* ldab */
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
break;
case 1: /* ldah */
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
break;
case 2: /* lda */
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -9333,13 +9717,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = load_reg(s, rs);
switch (op) {
case 0: /* stlb */
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
break;
case 1: /* stlh */
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
break;
case 2: /* stl */
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
break;
default:
abort();
@@ -9367,10 +9751,10 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_addi_i32(addr, addr, -8);
/* Load PC into tmp and CPSR into tmp2. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
if (insn & (1 << 21)) {
/* Base writeback. */
if (insn & (1 << 24)) {
@@ -9409,7 +9793,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (insn & (1 << 20)) {
/* Load. */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
if (i == 15) {
gen_bx(s, tmp);
} else if (i == rn) {
@@ -9421,7 +9805,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
} else {
/* Store. */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
@@ -9749,7 +10133,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if (rs != 15)
{
@@ -9773,7 +10157,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = tcg_temp_new_i32();
- tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
break;
case 7: /* Unsigned sum of absolute differences. */
@@ -9957,6 +10341,18 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
if (arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
}
+
+ if (extract32(insn, 5, 1)) {
+ /* MSR (banked) */
+ int sysm = extract32(insn, 8, 4) |
+ (extract32(insn, 4, 1) << 4);
+ int r = op & 1;
+
+ gen_msr_banked(s, r, sysm, rm);
+ break;
+ }
+
+ /* MSR (for PSRs) */
tmp = load_reg(s, rn);
if (gen_set_psr(s,
msr_mask(s, (insn >> 8) & 0xf, op == 1),
@@ -9999,9 +10395,16 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
break;
case 4: /* dsb */
case 5: /* dmb */
- case 6: /* isb */
/* These execute as NOPs. */
break;
+ case 6: /* isb */
+ /* We need to break the TB after this insn
+ * to execute self-modifying code correctly
+ * and also to take any pending interrupts
+ * immediately.
+ */
+ gen_lookup_tb(s);
+ break;
default:
goto illegal_op;
}
@@ -10022,7 +10425,17 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
gen_exception_return(s, tmp);
break;
- case 6: /* mrs cpsr. */
+ case 6: /* MRS */
+ if (extract32(insn, 5, 1)) {
+ /* MRS (banked) */
+ int sysm = extract32(insn, 16, 4) |
+ (extract32(insn, 4, 1) << 4);
+
+ gen_mrs_banked(s, 0, sysm, rd);
+ break;
+ }
+
+ /* mrs cpsr */
tmp = tcg_temp_new_i32();
if (arm_dc_feature(s, ARM_FEATURE_M)) {
addr = tcg_const_i32(insn & 0xff);
@@ -10033,7 +10446,17 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
}
store_reg(s, rd, tmp);
break;
- case 7: /* mrs spsr. */
+ case 7: /* MRS */
+ if (extract32(insn, 5, 1)) {
+ /* MRS (banked) */
+ int sysm = extract32(insn, 16, 4) |
+ (extract32(insn, 4, 1) << 4);
+
+ gen_mrs_banked(s, 1, sysm, rd);
+ break;
+ }
+
+ /* mrs spsr. */
/* Not accessible in user mode. */
if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
goto illegal_op;
@@ -10344,19 +10767,19 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = tcg_temp_new_i32();
switch (op) {
case 0:
- gen_aa32_ld8u(tmp, addr, memidx);
+ gen_aa32_ld8u(s, tmp, addr, memidx);
break;
case 4:
- gen_aa32_ld8s(tmp, addr, memidx);
+ gen_aa32_ld8s(s, tmp, addr, memidx);
break;
case 1:
- gen_aa32_ld16u(tmp, addr, memidx);
+ gen_aa32_ld16u(s, tmp, addr, memidx);
break;
case 5:
- gen_aa32_ld16s(tmp, addr, memidx);
+ gen_aa32_ld16s(s, tmp, addr, memidx);
break;
case 2:
- gen_aa32_ld32u(tmp, addr, memidx);
+ gen_aa32_ld32u(s, tmp, addr, memidx);
break;
default:
tcg_temp_free_i32(tmp);
@@ -10373,13 +10796,13 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
tmp = load_reg(s, rs);
switch (op) {
case 0:
- gen_aa32_st8(tmp, addr, memidx);
+ gen_aa32_st8(s, tmp, addr, memidx);
break;
case 1:
- gen_aa32_st16(tmp, addr, memidx);
+ gen_aa32_st16(s, tmp, addr, memidx);
break;
case 2:
- gen_aa32_st32(tmp, addr, memidx);
+ gen_aa32_st32(s, tmp, addr, memidx);
break;
default:
tcg_temp_free_i32(tmp);
@@ -10423,7 +10846,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
}
}
- insn = arm_lduw_code(env, s->pc, s->bswap_code);
+ insn = arm_lduw_code(env, s->pc, s->sctlr_b);
s->pc += 2;
switch (insn >> 12) {
@@ -10516,7 +10939,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
addr = tcg_temp_new_i32();
tcg_gen_movi_i32(addr, val);
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(addr);
store_reg(s, rd, tmp);
break;
@@ -10719,28 +11142,28 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
switch (op) {
case 0: /* str */
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
break;
case 1: /* strh */
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
break;
case 2: /* strb */
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
break;
case 3: /* ldrsb */
- gen_aa32_ld8s(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
break;
case 4: /* ldr */
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
break;
case 5: /* ldrh */
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
break;
case 6: /* ldrb */
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
break;
case 7: /* ldrsh */
- gen_aa32_ld16s(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
break;
}
if (op >= 3) { /* load */
@@ -10762,12 +11185,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10784,12 +11207,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld8u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st8(tmp, addr, get_mem_index(s));
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10806,12 +11229,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld16u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st16(tmp, addr, get_mem_index(s));
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10827,12 +11250,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
store_reg(s, rd, tmp);
} else {
/* store */
tmp = load_reg(s, rd);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_temp_free_i32(addr);
@@ -10900,12 +11323,12 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* pop */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
store_reg(s, i, tmp);
} else {
/* push */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address. */
@@ -10917,13 +11340,13 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* pop pc */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
/* don't set the pc until the rest of the instruction
has completed */
} else {
/* push lr */
tmp = load_reg(s, 14);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
tcg_gen_addi_i32(addr, addr, 4);
@@ -10994,10 +11417,9 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
case 2:
/* setend */
ARCH(6);
- if (((insn >> 3) & 1) != s->bswap_code) {
- /* Dynamic endianness switching not implemented. */
- qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
- goto illegal_op;
+ if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
+ gen_helper_setend(cpu_env);
+ s->is_jmp = DISAS_UPDATE;
}
break;
case 3:
@@ -11053,7 +11475,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
if (insn & (1 << 11)) {
/* load */
tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, addr, get_mem_index(s));
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
if (i == rn) {
loaded_var = tmp;
} else {
@@ -11062,7 +11484,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
} else {
/* store */
tmp = load_reg(s, i);
- gen_aa32_st32(tmp, addr, get_mem_index(s));
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
tcg_temp_free_i32(tmp);
}
/* advance to the next address */
@@ -11135,22 +11557,46 @@ undef:
default_exception_el(s));
}
-/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
- basic block 'tb'. If search_pc is TRUE, also generate PC
- information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(ARMCPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
+{
+ /* Return true if the insn at dc->pc might cross a page boundary.
+ * (False positives are OK, false negatives are not.)
+ */
+ uint16_t insn;
+
+ if ((s->pc & 3) == 0) {
+ /* At a 4-aligned address we can't be crossing a page */
+ return false;
+ }
+
+ /* This must be a Thumb insn */
+ insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+
+ if ((insn >> 11) >= 0x1d) {
+ /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
+ * First half of a 32-bit Thumb insn. Thumb-1 cores might
+ * end up actually treating this as two 16-bit insns (see the
+ * code at the start of disas_thumb2_insn()) but we don't bother
+ * to check for that as it is unlikely, and false positives here
+ * are harmless.
+ */
+ return true;
+ }
+ /* Definitely a 16-bit insn, can't be crossing a page. */
+ return false;
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
- CPUBreakpoint *bp;
- int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
int num_insns;
int max_insns;
+ bool end_of_page;
/* generate intermediate code */
@@ -11158,7 +11604,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* the A32/T32 complexity to do with conditional execution/IT blocks/etc.
*/
if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
- gen_intermediate_code_internal_a64(cpu, tb, search_pc);
+ gen_intermediate_code_a64(cpu, tb);
return;
}
@@ -11172,9 +11618,14 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->condjmp = 0;
dc->aarch64 = 0;
- dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
- dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
+ dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
+ dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
@@ -11220,11 +11671,14 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
cpu_M0 = tcg_temp_new_i64();
next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
- lj = -1;
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0)
+ if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
gen_tb_start(tb);
@@ -11250,10 +11704,9 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* (3) if we leave the TB unexpectedly (eg a data abort on a load)
* then the CPUARMState will be wrong and we need to reset it.
* This is handled in the same way as restoration of the
- * PC in these situations: we will be called again with search_pc=1
- * and generate a mapping of the condexec bits for each PC in
- * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
- * this to restore the condexec bits.
+ * PC in these situations; we save the value of the condexec bits
+ * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
+ * then uses this to restore them after an exception.
*
* Note that there are no instructions which can read the condexec
* bits, and none which can write non-static values to them, so
@@ -11270,13 +11723,17 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
store_cpu_field(tmp, condexec_bits);
}
do {
+ tcg_gen_insn_start(dc->pc,
+ (dc->condexec_cond << 4) | (dc->condexec_mask >> 1));
+ num_insns++;
+
#ifdef CONFIG_USER_ONLY
/* Intercept jump to the magic kernel page. */
if (dc->pc >= 0xffff0000) {
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_KERNEL_TRAP);
- dc->is_jmp = DISAS_UPDATE;
+ dc->is_jmp = DISAS_EXC;
break;
}
#else
@@ -11284,40 +11741,40 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
/* We always get here via a jump, so know we are not in a
conditional execution block. */
gen_exception_internal(EXCP_EXCEPTION_EXIT);
- dc->is_jmp = DISAS_UPDATE;
+ dc->is_jmp = DISAS_EXC;
break;
}
#endif
if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ CPUBreakpoint *bp;
QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
if (bp->pc == dc->pc) {
- gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
- /* Advance PC so that clearing the breakpoint will
- invalidate this TB. */
- dc->pc += 2;
- goto done_generating;
+ if (bp->flags & BP_CPU) {
+ gen_set_condexec(dc);
+ gen_set_pc_im(dc, dc->pc);
+ gen_helper_check_breakpoints(cpu_env);
+ /* End the TB early; it's likely not going to be executed */
+ dc->is_jmp = DISAS_UPDATE;
+ } else {
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ /* The address covered by the breakpoint must be
+ included in [tb->pc, tb->pc + tb->size) in order
+ to for it to be properly cleared -- thus we
+ increment the PC here so that the logic setting
+ tb->size below does the right thing. */
+ /* TODO: Advance PC by correct instruction length to
+ * avoid disassembler error messages */
+ dc->pc += 2;
+ goto done_generating;
+ }
+ break;
}
}
}
- if (search_pc) {
- j = tcg_op_buf_count();
- if (lj < j) {
- lj++;
- while (lj < j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- tcg_ctx.gen_opc_pc[lj] = dc->pc;
- gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
- tcg_ctx.gen_opc_instr_start[lj] = 1;
- tcg_ctx.gen_opc_icount[lj] = num_insns;
- }
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
gen_io_start();
-
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(dc->pc);
}
if (dc->ss_active && !dc->pstate_ss) {
@@ -11331,7 +11788,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* "did not step an insn" case, and so the syndrome ISV and EX
* bits should be zero.
*/
- assert(num_insns == 0);
+ assert(num_insns == 1);
gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
default_exception_el(dc));
goto done_generating;
@@ -11348,7 +11805,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
}
}
} else {
- unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
+ unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
dc->pc += 4;
disas_arm_insn(dc, insn);
}
@@ -11367,12 +11824,24 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
* Otherwise the subsequent code could get translated several times.
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
- num_insns ++;
+
+ /* We want to stop the TB if the next insn starts in a new page,
+ * or if it spans between this page and the next. This means that
+ * if we're looking at the last halfword in the page we need to
+ * see if it's a 16-bit Thumb insn (which will fit in this TB)
+ * or a 32-bit Thumb insn (which won't).
+ * This is to avoid generating a silly TB with a single 16-bit insn
+ * in it at the end of this page (which would execute correctly
+ * but isn't very efficient).
+ */
+ end_of_page = (dc->pc >= next_page_start) ||
+ ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
+
} while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
- dc->pc < next_page_start &&
+ !end_of_page &&
num_insns < max_insns);
if (tb->cflags & CF_LAST_IO) {
@@ -11388,47 +11857,45 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
instruction was a conditional branch or trap, and the PC has
already been written. */
if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
- /* Make sure the pc is updated, and raise a debug exception. */
- if (dc->condjmp) {
- gen_set_condexec(dc);
- if (dc->is_jmp == DISAS_SWI) {
- gen_ss_advance(dc);
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
- default_exception_el(dc));
- } else if (dc->is_jmp == DISAS_HVC) {
- gen_ss_advance(dc);
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
- } else if (dc->is_jmp == DISAS_SMC) {
- gen_ss_advance(dc);
- gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
- } else if (dc->ss_active) {
- gen_step_complete_exception(dc);
- } else {
- gen_exception_internal(EXCP_DEBUG);
- }
- gen_set_label(dc->condlabel);
- }
- if (dc->condjmp || !dc->is_jmp) {
- gen_set_pc_im(dc, dc->pc);
- dc->condjmp = 0;
- }
+ /* Unconditional and "condition passed" instruction codepath. */
gen_set_condexec(dc);
- if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
+ switch (dc->is_jmp) {
+ case DISAS_SWI:
gen_ss_advance(dc);
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
default_exception_el(dc));
- } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
+ break;
+ case DISAS_HVC:
gen_ss_advance(dc);
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
- } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
+ break;
+ case DISAS_SMC:
gen_ss_advance(dc);
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
- } else if (dc->ss_active) {
- gen_step_complete_exception(dc);
- } else {
- /* FIXME: Single stepping a WFI insn will not halt
- the CPU. */
- gen_exception_internal(EXCP_DEBUG);
+ break;
+ case DISAS_NEXT:
+ case DISAS_UPDATE:
+ gen_set_pc_im(dc, dc->pc);
+ /* fall through */
+ default:
+ if (dc->ss_active) {
+ gen_step_complete_exception(dc);
+ } else {
+ /* FIXME: Single stepping a WFI insn will not halt
+ the CPU. */
+ gen_exception_internal(EXCP_DEBUG);
+ }
+ }
+ if (dc->condjmp) {
+ /* "Condition failed" instruction codepath. */
+ gen_set_label(dc->condlabel);
+ gen_set_condexec(dc);
+ gen_set_pc_im(dc, dc->pc);
+ if (dc->ss_active) {
+ gen_step_complete_exception(dc);
+ } else {
+ gen_exception_internal(EXCP_DEBUG);
+ }
}
} else {
/* While branches must always occur at the end of an IT block,
@@ -11444,9 +11911,11 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
case DISAS_NEXT:
gen_goto_tb(dc, 1, dc->pc);
break;
- default:
- case DISAS_JUMP:
case DISAS_UPDATE:
+ gen_set_pc_im(dc, dc->pc);
+ /* fall through */
+ case DISAS_JUMP:
+ default:
/* indicate that the hash table must be used to find the next TB */
tcg_gen_exit_tb(0);
break;
@@ -11489,33 +11958,17 @@ done_generating:
gen_tb_end(tb, num_insns);
#ifdef DEBUG_DISAS
- if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
+ qemu_log_in_addr_range(pc_start)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
log_target_disas(cs, pc_start, dc->pc - pc_start,
- dc->thumb | (dc->bswap_code << 1));
+ dc->thumb | (dc->sctlr_b << 1));
qemu_log("\n");
}
#endif
- if (search_pc) {
- j = tcg_op_buf_count();
- lj++;
- while (lj <= j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- } else {
- tb->size = dc->pc - pc_start;
- tb->icount = num_insns;
- }
-}
-
-void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
-{
- gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
-}
-
-void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
-{
- gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
}
static const char *cpu_mode_names[16] = {
@@ -11530,6 +11983,7 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
CPUARMState *env = &cpu->env;
int i;
uint32_t psr;
+ const char *ns_status;
if (is_a64(env)) {
aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
@@ -11544,13 +11998,22 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
cpu_fprintf(f, " ");
}
psr = cpsr_read(env);
- cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ (psr & CPSR_M) != ARM_CPU_MODE_MON) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+
+ cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
psr,
psr & (1 << 31) ? 'N' : '-',
psr & (1 << 30) ? 'Z' : '-',
psr & (1 << 29) ? 'C' : '-',
psr & (1 << 28) ? 'V' : '-',
psr & CPSR_T ? 'T' : 'A',
+ ns_status,
cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
if (flags & CPU_DUMP_FPU) {
@@ -11572,13 +12035,14 @@ void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
}
}
-void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
+ target_ulong *data)
{
if (is_a64(env)) {
- env->pc = tcg_ctx.gen_opc_pc[pc_pos];
+ env->pc = data[0];
env->condexec_bits = 0;
} else {
- env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
- env->condexec_bits = gen_opc_condexec_bits[pc_pos];
+ env->regs[15] = data[0];
+ env->condexec_bits = data[1];
}
}
diff --git a/qemu/target-arm/translate.h b/qemu/target-arm/translate.h
index 9ab978fb7..6a18d7bad 100644
--- a/qemu/target-arm/translate.h
+++ b/qemu/target-arm/translate.h
@@ -16,14 +16,16 @@ typedef struct DisasContext {
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
- int bswap_code;
+ int sctlr_b;
+ TCGMemOp be_data;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
bool ns; /* Use non-secure CPREG bank on access */
int fp_excp_el; /* FP exception EL or 0 if enabled */
- bool el3_is_aa64; /* Flag indicating whether EL3 is AArch64 or not */
+ /* Flag indicating that exceptions from secure mode are routed to EL3. */
+ bool secure_routed_to_el3;
bool vfp_enabled; /* FP enabled via FPSCR.EN */
int vec_len;
int vec_stride;
@@ -62,7 +64,21 @@ typedef struct DisasContext {
TCGv_i64 tmp_a64[TMP_A64_MAX];
} DisasContext;
-extern TCGv_ptr cpu_env;
+typedef struct DisasCompare {
+ TCGCond cond;
+ TCGv_i32 value;
+ bool value_global;
+} DisasCompare;
+
+/* Share the TCG temporaries common between 32 and 64 bit modes. */
+extern TCGv_env cpu_env;
+extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
+extern TCGv_i64 cpu_exclusive_addr;
+extern TCGv_i64 cpu_exclusive_val;
+#ifdef CONFIG_USER_ONLY
+extern TCGv_i64 cpu_exclusive_test;
+extern TCGv_i32 cpu_exclusive_info;
+#endif
static inline int arm_dc_feature(DisasContext *dc, int feature)
{
@@ -84,7 +100,7 @@ static inline int default_exception_el(DisasContext *s)
* exceptions can only be routed to ELs above 1, so we target the higher of
* 1 or the current EL.
*/
- return (s->mmu_idx == ARMMMUIdx_S1SE0 && !s->el3_is_aa64)
+ return (s->mmu_idx == ARMMMUIdx_S1SE0 && s->secure_routed_to_el3)
? 3 : MAX(1, s->current_el);
}
@@ -107,9 +123,7 @@ static inline int default_exception_el(DisasContext *s)
#ifdef TARGET_AARCH64
void a64_translate_init(void);
-void gen_intermediate_code_internal_a64(ARMCPU *cpu,
- TranslationBlock *tb,
- bool search_pc);
+void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb);
void gen_a64_set_pc_im(uint64_t val);
void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags);
@@ -118,9 +132,7 @@ static inline void a64_translate_init(void)
{
}
-static inline void gen_intermediate_code_internal_a64(ARMCPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+static inline void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
{
}
@@ -135,6 +147,9 @@ static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
}
#endif
+void arm_test_cc(DisasCompare *cmp, int cc);
+void arm_free_cc(DisasCompare *cmp);
+void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
void arm_gen_test_cc(int cc, TCGLabel *label);
#endif /* TARGET_ARM_TRANSLATE_H */