summaryrefslogtreecommitdiffstats
path: root/qemu/target-ppc
diff options
context:
space:
mode:
Diffstat (limited to 'qemu/target-ppc')
-rw-r--r--qemu/target-ppc/Makefile.objs4
-rw-r--r--qemu/target-ppc/arch_dump.c7
-rw-r--r--qemu/target-ppc/cpu-models.c16
-rw-r--r--qemu/target-ppc/cpu-models.h6
-rw-r--r--qemu/target-ppc/cpu-qom.h4
-rw-r--r--qemu/target-ppc/cpu.h152
-rw-r--r--qemu/target-ppc/dfp_helper.c22
-rw-r--r--qemu/target-ppc/excp_helper.c60
-rw-r--r--qemu/target-ppc/fpu_helper.c23
-rw-r--r--qemu/target-ppc/gdbstub.c12
-rw-r--r--qemu/target-ppc/helper.h1
-rw-r--r--qemu/target-ppc/int_helper.c20
-rw-r--r--qemu/target-ppc/kvm-stub.c1
-rw-r--r--qemu/target-ppc/kvm.c537
-rw-r--r--qemu/target-ppc/kvm_ppc.c41
-rw-r--r--qemu/target-ppc/kvm_ppc.h66
-rw-r--r--qemu/target-ppc/machine.c25
-rw-r--r--qemu/target-ppc/mem_helper.c7
-rw-r--r--qemu/target-ppc/mfrom_table_gen.c3
-rw-r--r--qemu/target-ppc/misc_helper.c1
-rw-r--r--qemu/target-ppc/mmu-hash32.c82
-rw-r--r--qemu/target-ppc/mmu-hash32.h30
-rw-r--r--qemu/target-ppc/mmu-hash64.c363
-rw-r--r--qemu/target-ppc/mmu-hash64.h37
-rw-r--r--qemu/target-ppc/mmu_helper.c132
-rw-r--r--qemu/target-ppc/monitor.c147
-rw-r--r--qemu/target-ppc/timebase_helper.c11
-rw-r--r--qemu/target-ppc/translate.c259
-rw-r--r--qemu/target-ppc/translate_init.c459
-rw-r--r--qemu/target-ppc/user_only_helper.c1
30 files changed, 1712 insertions, 817 deletions
diff --git a/qemu/target-ppc/Makefile.objs b/qemu/target-ppc/Makefile.objs
index a7ae392cc..e667e6970 100644
--- a/qemu/target-ppc/Makefile.objs
+++ b/qemu/target-ppc/Makefile.objs
@@ -1,10 +1,10 @@
obj-y += cpu-models.o
obj-y += translate.o
ifeq ($(CONFIG_SOFTMMU),y)
-obj-y += machine.o mmu_helper.o mmu-hash32.o
+obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o
obj-$(TARGET_PPC64) += mmu-hash64.o arch_dump.o
endif
-obj-$(CONFIG_KVM) += kvm.o kvm_ppc.o
+obj-$(CONFIG_KVM) += kvm.o
obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
obj-y += dfp_helper.o
obj-y += excp_helper.o
diff --git a/qemu/target-ppc/arch_dump.c b/qemu/target-ppc/arch_dump.c
index 5acafc68a..df1fd8c33 100644
--- a/qemu/target-ppc/arch_dump.c
+++ b/qemu/target-ppc/arch_dump.c
@@ -12,6 +12,7 @@
*
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "elf.h"
#include "exec/cpu-all.h"
@@ -278,9 +279,3 @@ int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
PowerPCCPU *cpu = POWERPC_CPU(cs);
return ppc64_write_all_elf64_notes("CORE", f, cpu, cpuid, opaque);
}
-
-int ppc64_cpu_write_elf64_qemunote(WriteCoreDumpFunction f,
- CPUState *cpu, void *opaque)
-{
- return 0;
-}
diff --git a/qemu/target-ppc/cpu-models.c b/qemu/target-ppc/cpu-models.c
index 4d5ab4ba1..5209e63a7 100644
--- a/qemu/target-ppc/cpu-models.c
+++ b/qemu/target-ppc/cpu-models.c
@@ -24,6 +24,7 @@
* inside "#if defined(TODO) ... #endif" statements to make tests easier.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "cpu-models.h"
@@ -1138,10 +1139,12 @@
"POWER7 v2.3")
POWERPC_DEF("POWER7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
"POWER7+ v2.1")
- POWERPC_DEF("POWER8E_v1.0", CPU_POWERPC_POWER8E_v10, POWER8,
- "POWER8E v1.0")
- POWERPC_DEF("POWER8_v1.0", CPU_POWERPC_POWER8_v10, POWER8,
- "POWER8 v1.0")
+ POWERPC_DEF("POWER8E_v2.1", CPU_POWERPC_POWER8E_v21, POWER8,
+ "POWER8E v2.1")
+ POWERPC_DEF("POWER8_v2.0", CPU_POWERPC_POWER8_v20, POWER8,
+ "POWER8 v2.0")
+ POWERPC_DEF("POWER8NVL_v1.0",CPU_POWERPC_POWER8NVL_v10, POWER8,
+ "POWER8NVL v1.0")
POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
"PowerPC 970 v2.2")
POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
@@ -1389,8 +1392,9 @@ PowerPCCPUAlias ppc_cpu_aliases[] = {
{ "POWER5gs", "POWER5+_v2.1" },
{ "POWER7", "POWER7_v2.3" },
{ "POWER7+", "POWER7+_v2.1" },
- { "POWER8E", "POWER8E_v1.0" },
- { "POWER8", "POWER8_v1.0" },
+ { "POWER8E", "POWER8E_v2.1" },
+ { "POWER8", "POWER8_v2.0" },
+ { "POWER8NVL", "POWER8NVL_v1.0" },
{ "970", "970_v2.2" },
{ "970fx", "970fx_v3.1" },
{ "970mp", "970mp_v1.1" },
diff --git a/qemu/target-ppc/cpu-models.h b/qemu/target-ppc/cpu-models.h
index 9d80e7227..f21a44c83 100644
--- a/qemu/target-ppc/cpu-models.h
+++ b/qemu/target-ppc/cpu-models.h
@@ -557,9 +557,11 @@ enum {
CPU_POWERPC_POWER7P_BASE = 0x004A0000,
CPU_POWERPC_POWER7P_v21 = 0x004A0201,
CPU_POWERPC_POWER8E_BASE = 0x004B0000,
- CPU_POWERPC_POWER8E_v10 = 0x004B0100,
+ CPU_POWERPC_POWER8E_v21 = 0x004B0201,
CPU_POWERPC_POWER8_BASE = 0x004D0000,
- CPU_POWERPC_POWER8_v10 = 0x004D0100,
+ CPU_POWERPC_POWER8_v20 = 0x004D0200,
+ CPU_POWERPC_POWER8NVL_BASE = 0x004C0000,
+ CPU_POWERPC_POWER8NVL_v10 = 0x004C0100,
CPU_POWERPC_970_v22 = 0x00390202,
CPU_POWERPC_970FX_v10 = 0x00391100,
CPU_POWERPC_970FX_v20 = 0x003C0200,
diff --git a/qemu/target-ppc/cpu-qom.h b/qemu/target-ppc/cpu-qom.h
index 6967a8028..7d5e2b36a 100644
--- a/qemu/target-ppc/cpu-qom.h
+++ b/qemu/target-ppc/cpu-qom.h
@@ -118,13 +118,13 @@ void ppc_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
int flags);
void ppc_cpu_dump_statistics(CPUState *cpu, FILE *f,
fprintf_function cpu_fprintf, int flags);
+int ppc_cpu_get_monitor_def(CPUState *cs, const char *name,
+ uint64_t *pval);
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
int ppc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int ppc_cpu_gdb_read_register_apple(CPUState *cpu, uint8_t *buf, int reg);
int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg);
-int ppc64_cpu_write_elf64_qemunote(WriteCoreDumpFunction f,
- CPUState *cpu, void *opaque);
int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
#ifndef CONFIG_USER_ONLY
diff --git a/qemu/target-ppc/cpu.h b/qemu/target-ppc/cpu.h
index 6f76674a4..5282533b3 100644
--- a/qemu/target-ppc/cpu.h
+++ b/qemu/target-ppc/cpu.h
@@ -19,7 +19,6 @@
#if !defined (__CPU_PPC_H__)
#define __CPU_PPC_H__
-#include "config.h"
#include "qemu-common.h"
//#define PPC_EMULATE_32BITS_HYPV
@@ -81,9 +80,9 @@
#include "fpu/softfloat.h"
#if defined (TARGET_PPC64)
-#define ELF_MACHINE EM_PPC64
+#define PPC_ELF_MACHINE EM_PPC64
#else
-#define ELF_MACHINE EM_PPC
+#define PPC_ELF_MACHINE EM_PPC
#endif
/*****************************************************************************/
@@ -117,14 +116,20 @@ enum powerpc_mmu_t {
#define POWERPC_MMU_AMR 0x00040000
/* 64 bits PowerPC MMU */
POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
+ /* Architecture 2.03 and later (has LPCR) */
+ POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
/* Architecture 2.06 variant */
POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
| POWERPC_MMU_AMR | 0x00000003,
/* Architecture 2.06 "degraded" (no 1T segments) */
POWERPC_MMU_2_06a = POWERPC_MMU_64 | POWERPC_MMU_AMR
| 0x00000003,
- /* Architecture 2.06 "degraded" (no 1T segments or AMR) */
- POWERPC_MMU_2_06d = POWERPC_MMU_64 | 0x00000003,
+ /* Architecture 2.07 variant */
+ POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_AMR | 0x00000004,
+ /* Architecture 2.07 "degraded" (no 1T segments) */
+ POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR
+ | 0x00000004,
#endif /* defined(TARGET_PPC64) */
};
@@ -162,6 +167,8 @@ enum powerpc_excp_t {
POWERPC_EXCP_970,
/* POWER7 exception model */
POWERPC_EXCP_POWER7,
+ /* POWER8 exception model */
+ POWERPC_EXCP_POWER8,
#endif /* defined(TARGET_PPC64) */
};
@@ -413,6 +420,7 @@ typedef struct ppc_slb_t ppc_slb_t;
struct ppc_slb_t {
uint64_t esid;
uint64_t vsid;
+ const struct ppc_one_seg_page_size *sps;
};
#define MAX_SLB_ENTRIES 64
@@ -468,9 +476,17 @@ struct ppc_slb_t {
#define MSR_RI 1 /* Recoverable interrupt 1 */
#define MSR_LE 0 /* Little-endian mode 1 hflags */
-#define LPCR_ILE (1 << (63-38))
-#define LPCR_AIL_SHIFT (63-40) /* Alternate interrupt location */
-#define LPCR_AIL (3 << LPCR_AIL_SHIFT)
+/* LPCR bits */
+#define LPCR_VPM0 (1ull << (63 - 0))
+#define LPCR_VPM1 (1ull << (63 - 1))
+#define LPCR_ISL (1ull << (63 - 2))
+#define LPCR_KBV (1ull << (63 - 3))
+#define LPCR_ILE (1ull << (63 - 38))
+#define LPCR_MER (1ull << (63 - 52))
+#define LPCR_LPES0 (1ull << (63 - 60))
+#define LPCR_LPES1 (1ull << (63 - 61))
+#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
+#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
#define msr_sf ((env->msr >> MSR_SF) & 1)
#define msr_isf ((env->msr >> MSR_ISF) & 1)
@@ -678,6 +694,46 @@ enum {
#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
0x1F)
+#define FP_FX (1ull << FPSCR_FX)
+#define FP_FEX (1ull << FPSCR_FEX)
+#define FP_VX (1ull << FPSCR_VX)
+#define FP_OX (1ull << FPSCR_OX)
+#define FP_UX (1ull << FPSCR_UX)
+#define FP_ZX (1ull << FPSCR_ZX)
+#define FP_XX (1ull << FPSCR_XX)
+#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
+#define FP_VXISI (1ull << FPSCR_VXISI)
+#define FP_VXIDI (1ull << FPSCR_VXIDI)
+#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
+#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
+#define FP_VXVC (1ull << FPSCR_VXVC)
+#define FP_FR (1ull << FSPCR_FR)
+#define FP_FI (1ull << FPSCR_FI)
+#define FP_C (1ull << FPSCR_C)
+#define FP_FL (1ull << FPSCR_FL)
+#define FP_FG (1ull << FPSCR_FG)
+#define FP_FE (1ull << FPSCR_FE)
+#define FP_FU (1ull << FPSCR_FU)
+#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
+#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
+#define FP_VXCVI (1ull << FPSCR_VXCVI)
+#define FP_VE (1ull << FPSCR_VE)
+#define FP_OE (1ull << FPSCR_OE)
+#define FP_UE (1ull << FPSCR_UE)
+#define FP_ZE (1ull << FPSCR_ZE)
+#define FP_XE (1ull << FPSCR_XE)
+#define FP_NI (1ull << FPSCR_NI)
+#define FP_RN1 (1ull << FPSCR_RN1)
+#define FP_RN (1ull << FPSCR_RN)
+
+/* the exception bits which can be cleared by mcrfs - includes FX */
+#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
+ FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \
+ FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
+ FP_VXSQRT | FP_VXCVI)
+
/*****************************************************************************/
/* Vector status and control register */
#define VSCR_NJ 16 /* Vector non-java */
@@ -1073,6 +1129,7 @@ struct CPUPPCState {
uint64_t insns_flags2;
#if defined(TARGET_PPC64)
struct ppc_segment_page_sizes sps;
+ bool ci_large_pages;
#endif
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
@@ -1182,7 +1239,7 @@ void ppc_store_msr (CPUPPCState *env, target_ulong value);
void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf);
int ppc_get_compat_smt_threads(PowerPCCPU *cpu);
-int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
+void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp);
/* Time-base and decrementer management */
#ifndef NO_CPU_IO_DEFS
@@ -1213,6 +1270,7 @@ void store_booke_tcr (CPUPPCState *env, target_ulong val);
void store_booke_tsr (CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all (CPUPPCState *env);
void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
+void cpu_ppc_set_papr(PowerPCCPU *cpu);
#endif
#endif
@@ -1241,7 +1299,6 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define cpu_init(cpu_model) CPU(cpu_ppc_init(cpu_model))
#define cpu_exec cpu_ppc_exec
-#define cpu_gen_code cpu_ppc_gen_code
#define cpu_signal_handler cpu_ppc_signal_handler
#define cpu_list ppc_cpu_list
@@ -1250,7 +1307,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
#define MMU_MODE1_SUFFIX _kernel
#define MMU_MODE2_SUFFIX _hypv
#define MMU_USER_IDX 0
-static inline int cpu_mmu_index (CPUPPCState *env)
+static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
{
return env->mmu_idx;
}
@@ -1300,11 +1357,14 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_SRR1 (0x01B)
#define SPR_CFAR (0x01C)
#define SPR_AMR (0x01D)
+#define SPR_ACOP (0x01F)
#define SPR_BOOKE_PID (0x030)
+#define SPR_BOOKS_PID (0x030)
#define SPR_BOOKE_DECAR (0x036)
#define SPR_BOOKE_CSRR0 (0x03A)
#define SPR_BOOKE_CSRR1 (0x03B)
#define SPR_BOOKE_DEAR (0x03D)
+#define SPR_IAMR (0x03D)
#define SPR_BOOKE_ESR (0x03E)
#define SPR_BOOKE_IVPR (0x03F)
#define SPR_MPC_EIE (0x050)
@@ -1334,6 +1394,12 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_UAMOR (0x09D)
#define SPR_MPC_ICTRL (0x09E)
#define SPR_MPC_BAR (0x09F)
+#define SPR_PSPB (0x09F)
+#define SPR_DAWR (0x0B4)
+#define SPR_RPR (0x0BA)
+#define SPR_CIABR (0x0BB)
+#define SPR_DAWRX (0x0BC)
+#define SPR_HFSCR (0x0BE)
#define SPR_VRSAVE (0x100)
#define SPR_USPRG0 (0x100)
#define SPR_USPRG1 (0x101)
@@ -1388,19 +1454,25 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_HSRR1 (0x13B)
#define SPR_BOOKE_IAC4 (0x13B)
#define SPR_BOOKE_DAC1 (0x13C)
-#define SPR_LPIDR (0x13D)
+#define SPR_MMCRH (0x13C)
#define SPR_DABR2 (0x13D)
#define SPR_BOOKE_DAC2 (0x13D)
+#define SPR_TFMR (0x13D)
#define SPR_BOOKE_DVC1 (0x13E)
#define SPR_LPCR (0x13E)
#define SPR_BOOKE_DVC2 (0x13F)
+#define SPR_LPIDR (0x13F)
#define SPR_BOOKE_TSR (0x150)
+#define SPR_HMER (0x150)
+#define SPR_HMEER (0x151)
#define SPR_PCR (0x152)
+#define SPR_BOOKE_LPIDR (0x152)
#define SPR_BOOKE_TCR (0x154)
#define SPR_BOOKE_TLB0PS (0x158)
#define SPR_BOOKE_TLB1PS (0x159)
#define SPR_BOOKE_TLB2PS (0x15A)
#define SPR_BOOKE_TLB3PS (0x15B)
+#define SPR_AMOR (0x15D)
#define SPR_BOOKE_MAS7_MAS3 (0x174)
#define SPR_BOOKE_IVOR0 (0x190)
#define SPR_BOOKE_IVOR1 (0x191)
@@ -1517,6 +1589,7 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_PERF0 (0x300)
#define SPR_RCPU_MI_RBA0 (0x300)
#define SPR_MPC_MI_CTR (0x300)
+#define SPR_POWER_USIER (0x300)
#define SPR_PERF1 (0x301)
#define SPR_RCPU_MI_RBA1 (0x301)
#define SPR_POWER_UMMCR2 (0x301)
@@ -1566,6 +1639,7 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_PERFF (0x30F)
#define SPR_MPC_MD_TW (0x30F)
#define SPR_UPERF0 (0x310)
+#define SPR_POWER_SIER (0x310)
#define SPR_UPERF1 (0x311)
#define SPR_POWER_MMCR2 (0x311)
#define SPR_UPERF2 (0x312)
@@ -1617,7 +1691,9 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_MPC_MD_DBRAM1 (0x32A)
#define SPR_RCPU_L2U_RA3 (0x32B)
#define SPR_TAR (0x32F)
+#define SPR_IC (0x350)
#define SPR_VTB (0x351)
+#define SPR_MMCRC (0x353)
#define SPR_440_INV0 (0x370)
#define SPR_440_INV1 (0x371)
#define SPR_440_INV2 (0x372)
@@ -1627,8 +1703,14 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_440_ITV2 (0x376)
#define SPR_440_ITV3 (0x377)
#define SPR_440_CCR1 (0x378)
+#define SPR_TACR (0x378)
+#define SPR_TCSCR (0x379)
+#define SPR_CSIGR (0x37a)
#define SPR_DCRIPR (0x37B)
+#define SPR_POWER_SPMC1 (0x37C)
+#define SPR_POWER_SPMC2 (0x37D)
#define SPR_POWER_MMCRS (0x37E)
+#define SPR_WORT (0x37F)
#define SPR_PPR (0x380)
#define SPR_750_GQR0 (0x390)
#define SPR_440_DNV0 (0x390)
@@ -1651,6 +1733,7 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define SPR_440_DVLIM (0x398)
#define SPR_750_WPAR (0x399)
#define SPR_440_IVLIM (0x399)
+#define SPR_TSCR (0x399)
#define SPR_750_DMAU (0x39A)
#define SPR_750_DMAL (0x39B)
#define SPR_440_RSTCFG (0x39B)
@@ -1825,9 +1908,10 @@ static inline int cpu_mmu_index (CPUPPCState *env)
#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
/* HID0 bits */
-#define HID0_DEEPNAP (1 << 24)
-#define HID0_DOZE (1 << 23)
-#define HID0_NAP (1 << 22)
+#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
+#define HID0_DOZE (1 << 23) /* pre-2.06 */
+#define HID0_NAP (1 << 22) /* pre-2.06 */
+#define HID0_HILE (1ull << (63 - 19)) /* POWER8 */
/*****************************************************************************/
/* PowerPC Instructions types definitions */
@@ -2176,6 +2260,33 @@ enum {
PCR_TM_DIS = 1ull << (63-2), /* Trans. memory disable (POWER8) */
};
+/* HMER/HMEER */
+enum {
+ HMER_MALFUNCTION_ALERT = 1ull << (63 - 0),
+ HMER_PROC_RECV_DONE = 1ull << (63 - 2),
+ HMER_PROC_RECV_ERROR_MASKED = 1ull << (63 - 3),
+ HMER_TFAC_ERROR = 1ull << (63 - 4),
+ HMER_TFMR_PARITY_ERROR = 1ull << (63 - 5),
+ HMER_XSCOM_FAIL = 1ull << (63 - 8),
+ HMER_XSCOM_DONE = 1ull << (63 - 9),
+ HMER_PROC_RECV_AGAIN = 1ull << (63 - 11),
+ HMER_WARN_RISE = 1ull << (63 - 14),
+ HMER_WARN_FALL = 1ull << (63 - 15),
+ HMER_SCOM_FIR_HMI = 1ull << (63 - 16),
+ HMER_TRIG_FIR_HMI = 1ull << (63 - 17),
+ HMER_HYP_RESOURCE_ERR = 1ull << (63 - 20),
+ HMER_XSCOM_STATUS_MASK = 7ull << (63 - 23),
+ HMER_XSCOM_STATUS_LSH = (63 - 23),
+};
+
+/* Alternate Interrupt Location (AIL) */
+enum {
+ AIL_NONE = 0,
+ AIL_RESERVED = 1,
+ AIL_0001_8000 = 2,
+ AIL_C000_0000_0000_4000 = 3,
+};
+
/*****************************************************************************/
static inline target_ulong cpu_read_xer(CPUPPCState *env)
@@ -2304,6 +2415,16 @@ static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr)
return msr & (1ULL << MSR_SF);
}
+/**
+ * Check whether register rx is in the range between start and
+ * start + nregs (as needed by the LSWX and LSWI instructions)
+ */
+static inline bool lsw_reg_in_range(int start, int nregs, int rx)
+{
+ return (start + nregs <= 32 && rx >= start && rx < start + nregs) ||
+ (start + nregs > 32 && (rx >= start || rx < start + nregs - 32));
+}
+
extern void (*cpu_ppc_hypercall)(PowerPCCPU *);
#include "exec/exec-all.h"
@@ -2328,4 +2449,5 @@ int ppc_get_vcpu_dt_id(PowerPCCPU *cpu);
*/
PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id);
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
#endif /* !defined (__CPU_PPC_H__) */
diff --git a/qemu/target-ppc/dfp_helper.c b/qemu/target-ppc/dfp_helper.c
index 49820bf21..db0ede698 100644
--- a/qemu/target-ppc/dfp_helper.c
+++ b/qemu/target-ppc/dfp_helper.c
@@ -17,6 +17,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
@@ -170,27 +171,6 @@ static void dfp_prepare_decimal128(struct PPC_DFP *dfp, uint64_t *a,
}
}
-#define FP_FX (1ull << FPSCR_FX)
-#define FP_FEX (1ull << FPSCR_FEX)
-#define FP_OX (1ull << FPSCR_OX)
-#define FP_OE (1ull << FPSCR_OE)
-#define FP_UX (1ull << FPSCR_UX)
-#define FP_UE (1ull << FPSCR_UE)
-#define FP_XX (1ull << FPSCR_XX)
-#define FP_XE (1ull << FPSCR_XE)
-#define FP_ZX (1ull << FPSCR_ZX)
-#define FP_ZE (1ull << FPSCR_ZE)
-#define FP_VX (1ull << FPSCR_VX)
-#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
-#define FP_VXISI (1ull << FPSCR_VXISI)
-#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
-#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
-#define FP_VXIDI (1ull << FPSCR_VXIDI)
-#define FP_VXVC (1ull << FPSCR_VXVC)
-#define FP_VXCVI (1ull << FPSCR_VXCVI)
-#define FP_VE (1ull << FPSCR_VE)
-#define FP_FI (1ull << FPSCR_FI)
-
static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag,
uint64_t enabled)
{
diff --git a/qemu/target-ppc/excp_helper.c b/qemu/target-ppc/excp_helper.c
index b80347506..ca4ffe8ad 100644
--- a/qemu/target-ppc/excp_helper.c
+++ b/qemu/target-ppc/excp_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
@@ -23,6 +24,7 @@
#include "helper_regs.h"
//#define DEBUG_OP
+//#define DEBUG_SOFTWARE_TLB
//#define DEBUG_EXCEPTIONS
#ifdef DEBUG_EXCEPTIONS
@@ -75,7 +77,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
CPUPPCState *env = &cpu->env;
target_ulong msr, new_msr, vector;
int srr0, srr1, asrr0, asrr1;
- int lpes0, lpes1, lev;
+ int lpes0, lpes1, lev, ail;
if (0) {
/* XXX: find a suitable condition to enable the hypervisor mode */
@@ -106,6 +108,25 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
asrr0 = -1;
asrr1 = -1;
+ /* Exception targetting modifiers
+ *
+ * AIL is initialized here but can be cleared by
+ * selected exceptions
+ */
+#if defined(TARGET_PPC64)
+ if (excp_model == POWERPC_EXCP_POWER7 ||
+ excp_model == POWERPC_EXCP_POWER8) {
+ if (excp_model == POWERPC_EXCP_POWER8) {
+ ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
+ } else {
+ ail = 0;
+ }
+ } else
+#endif /* defined(TARGET_PPC64) */
+ {
+ ail = 0;
+ }
+
switch (excp) {
case POWERPC_EXCP_NONE:
/* Should never happen */
@@ -131,12 +152,11 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Machine check exception is not enabled.
* Enter checkstop state.
*/
- if (qemu_log_enabled()) {
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
qemu_log("Machine check while not allowed. "
"Entering checkstop state\n");
- } else {
- fprintf(stderr, "Machine check while not allowed. "
- "Entering checkstop state\n");
}
cs->halted = 1;
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
@@ -145,6 +165,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* XXX: find a suitable condition to enable the hypervisor mode */
new_msr |= (target_ulong)MSR_HVB;
}
+ ail = 0;
/* machine check exceptions don't have ME set */
new_msr &= ~((target_ulong)1 << MSR_ME);
@@ -200,7 +221,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* Get rS/rD and rA from faulting opcode */
env->spr[SPR_DSISR] |= (cpu_ldl_code(env, (env->nip - 4))
& 0x03FF0000) >> 16;
- goto store_current;
+ goto store_next;
case POWERPC_EXCP_PROGRAM: /* Program exception */
switch (env->error_code & ~0xF) {
case POWERPC_EXCP_FP:
@@ -343,6 +364,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
/* XXX: find a suitable condition to enable the hypervisor mode */
new_msr |= (target_ulong)MSR_HVB;
}
+ ail = 0;
goto store_next;
case POWERPC_EXCP_DSEG: /* Data segment exception */
if (lpes1 == 0) {
@@ -629,7 +651,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
#ifdef TARGET_PPC64
- if (excp_model == POWERPC_EXCP_POWER7) {
+ if (excp_model == POWERPC_EXCP_POWER7 ||
+ excp_model == POWERPC_EXCP_POWER8) {
if (env->spr[SPR_LPCR] & LPCR_ILE) {
new_msr |= (target_ulong)1 << MSR_LE;
}
@@ -649,6 +672,29 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
excp);
}
vector |= env->excp_prefix;
+
+ /* AIL only works if there is no HV transition and we are running with
+ * translations enabled
+ */
+ if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1)) {
+ ail = 0;
+ }
+ /* Handle AIL */
+ if (ail) {
+ new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
+ switch(ail) {
+ case AIL_0001_8000:
+ vector |= 0x18000;
+ break;
+ case AIL_C000_0000_0000_4000:
+ vector |= 0xc000000000004000ull;
+ break;
+ default:
+ cpu_abort(cs, "Invalid AIL combination %d\n", ail);
+ break;
+ }
+ }
+
#if defined(TARGET_PPC64)
if (excp_model == POWERPC_EXCP_BOOKE) {
if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
diff --git a/qemu/target-ppc/fpu_helper.c b/qemu/target-ppc/fpu_helper.c
index 6cceffc55..b67ebca12 100644
--- a/qemu/target-ppc/fpu_helper.c
+++ b/qemu/target-ppc/fpu_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
@@ -194,7 +195,7 @@ static inline uint64_t fload_invalid_op_excp(CPUPPCState *env, int op,
/* Update the floating-point invalid operation summary */
env->fpscr |= 1 << FPSCR_VX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (ve != 0) {
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
@@ -211,7 +212,7 @@ static inline void float_zero_divide_excp(CPUPPCState *env)
env->fpscr |= 1 << FPSCR_ZX;
env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ze != 0) {
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
@@ -228,7 +229,7 @@ static inline void float_overflow_excp(CPUPPCState *env)
env->fpscr |= 1 << FPSCR_OX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_oe != 0) {
/* XXX: should adjust the result */
/* Update the floating-point enabled exception summary */
@@ -248,7 +249,7 @@ static inline void float_underflow_excp(CPUPPCState *env)
env->fpscr |= 1 << FPSCR_UX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ue != 0) {
/* XXX: should adjust the result */
/* Update the floating-point enabled exception summary */
@@ -265,7 +266,7 @@ static inline void float_inexact_excp(CPUPPCState *env)
env->fpscr |= 1 << FPSCR_XX;
/* Update the floating-point exception summary */
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_xe != 0) {
/* Update the floating-point enabled exception summary */
env->fpscr |= 1 << FPSCR_FEX;
@@ -330,31 +331,31 @@ void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
if (prev == 0) {
switch (bit) {
case FPSCR_VX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ve) {
goto raise_ve;
}
break;
case FPSCR_OX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_oe) {
goto raise_oe;
}
break;
case FPSCR_UX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ue) {
goto raise_ue;
}
break;
case FPSCR_ZX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ze) {
goto raise_ze;
}
break;
case FPSCR_XX:
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_xe) {
goto raise_xe;
}
@@ -369,7 +370,7 @@ void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
case FPSCR_VXSQRT:
case FPSCR_VXCVI:
env->fpscr |= 1 << FPSCR_VX;
- env->fpscr |= 1 << FPSCR_FX;
+ env->fpscr |= FP_FX;
if (fpscr_ve != 0) {
goto raise_ve;
}
diff --git a/qemu/target-ppc/gdbstub.c b/qemu/target-ppc/gdbstub.c
index 14675f456..569c380cf 100644
--- a/qemu/target-ppc/gdbstub.c
+++ b/qemu/target-ppc/gdbstub.c
@@ -17,7 +17,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#include "config.h"
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "exec/gdbstub.h"
@@ -88,7 +88,7 @@ static int ppc_gdb_register_len(int n)
the proper ordering for the binary, and cannot be changed.
For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
the current mode of the chip to see if we're running in little-endian. */
-static void maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
{
#ifndef CONFIG_USER_ONLY
if (!msr_le) {
@@ -158,7 +158,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
break;
}
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
return r;
}
@@ -214,7 +214,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
break;
}
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
return r;
}
@@ -227,7 +227,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
if (!r) {
return r;
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
if (n < 32) {
/* gprs */
env->gpr[n] = ldtul_p(mem_buf);
@@ -277,7 +277,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
if (!r) {
return r;
}
- maybe_bswap_register(env, mem_buf, r);
+ ppc_maybe_bswap_register(env, mem_buf, r);
if (n < 32) {
/* gprs */
env->gpr[n] = ldq_p(mem_buf);
diff --git a/qemu/target-ppc/helper.h b/qemu/target-ppc/helper.h
index 869be1509..e5a8f7b9b 100644
--- a/qemu/target-ppc/helper.h
+++ b/qemu/target-ppc/helper.h
@@ -544,6 +544,7 @@ DEF_HELPER_2(74xx_tlbd, void, env, tl)
DEF_HELPER_2(74xx_tlbi, void, env, tl)
DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl)
#if defined(TARGET_PPC64)
DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl)
DEF_HELPER_2(load_slb_esid, tl, env, tl)
diff --git a/qemu/target-ppc/int_helper.c b/qemu/target-ppc/int_helper.c
index 0a55d5e54..27b0258d3 100644
--- a/qemu/target-ppc/int_helper.c
+++ b/qemu/target-ppc/int_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
@@ -2327,24 +2328,28 @@ void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
+ ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u32) {
- r->AVRW(i) = b->AVRW(i) ^
+ result.AVRW(i) = b->AVRW(i) ^
(AES_Te0[a->AVRB(AES_shifts[4*i + 0])] ^
AES_Te1[a->AVRB(AES_shifts[4*i + 1])] ^
AES_Te2[a->AVRB(AES_shifts[4*i + 2])] ^
AES_Te3[a->AVRB(AES_shifts[4*i + 3])]);
}
+ *r = result;
}
void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
+ ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u8) {
- r->AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
+ result.AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
}
+ *r = result;
}
void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
@@ -2369,11 +2374,13 @@ void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
{
+ ppc_avr_t result;
int i;
VECTOR_FOR_INORDER_I(i, u8) {
- r->AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
+ result.AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
}
+ *r = result;
}
#define ROTRu32(v, n) (((v) >> (n)) | ((v) << (32-n)))
@@ -2460,16 +2467,19 @@ void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
{
+ ppc_avr_t result;
int i;
+
VECTOR_FOR_INORDER_I(i, u8) {
int indexA = c->u8[i] >> 4;
int indexB = c->u8[i] & 0xF;
#if defined(HOST_WORDS_BIGENDIAN)
- r->u8[i] = a->u8[indexA] ^ b->u8[indexB];
+ result.u8[i] = a->u8[indexA] ^ b->u8[indexB];
#else
- r->u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
+ result.u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
#endif
}
+ *r = result;
}
#undef VECTOR_FOR_INORDER_I
diff --git a/qemu/target-ppc/kvm-stub.c b/qemu/target-ppc/kvm-stub.c
index ee3f5d2f7..627bcb432 100644
--- a/qemu/target-ppc/kvm-stub.c
+++ b/qemu/target-ppc/kvm-stub.c
@@ -9,6 +9,7 @@
* See the COPYING file in the top-level directory.
*
*/
+#include "qemu/osdep.h"
#include "qemu-common.h"
#include "hw/ppc/openpic.h"
diff --git a/qemu/target-ppc/kvm.c b/qemu/target-ppc/kvm.c
index 110436d08..c4c81467e 100644
--- a/qemu/target-ppc/kvm.c
+++ b/qemu/target-ppc/kvm.c
@@ -14,8 +14,8 @@
*
*/
+#include "qemu/osdep.h"
#include <dirent.h>
-#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/vfs.h>
@@ -23,6 +23,7 @@
#include <linux/kvm.h>
#include "qemu-common.h"
+#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
@@ -41,6 +42,7 @@
#include "exec/gdbstub.h"
#include "exec/memattrs.h"
#include "sysemu/hostmem.h"
+#include "qemu/cutils.h"
//#define DEBUG_KVM
@@ -259,7 +261,8 @@ static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
info->flags |= KVM_PPC_1T_SEGMENTS;
}
- if (env->mmu_model == POWERPC_MMU_2_06) {
+ if (env->mmu_model == POWERPC_MMU_2_06 ||
+ env->mmu_model == POWERPC_MMU_2_07) {
info->slb_size = 32;
} else {
info->slb_size = 64;
@@ -272,8 +275,9 @@ static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
info->sps[i].enc[0].pte_enc = 0;
i++;
- /* 64K on MMU 2.06 */
- if (env->mmu_model == POWERPC_MMU_2_06) {
+ /* 64K on MMU 2.06 and later */
+ if (env->mmu_model == POWERPC_MMU_2_06 ||
+ env->mmu_model == POWERPC_MMU_2_07) {
info->sps[i].page_shift = 16;
info->sps[i].slb_enc = 0x110;
info->sps[i].enc[0].page_shift = 16;
@@ -330,6 +334,12 @@ static long gethugepagesize(const char *mem_path)
return fs.f_bsize;
}
+/*
+ * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
+ * may or may not name the same files / on the same filesystem now as
+ * when we actually open and map them. Iterate over the file
+ * descriptors instead, and use qemu_fd_getpagesize().
+ */
static int find_max_supported_pagesize(Object *obj, void *opaque)
{
char *mem_path;
@@ -412,6 +422,13 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
/* Convert to QEMU form */
memset(&env->sps, 0, sizeof(env->sps));
+ /* If we have HV KVM, we need to forbid CI large pages if our
+ * host page size is smaller than 64K.
+ */
+ if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
+ env->ci_large_pages = getpagesize() >= 0x10000;
+ }
+
/*
* XXX This loop should be an entry wide AND of the capabilities that
* the selected CPU has with the capabilities that KVM supports.
@@ -503,6 +520,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
/* Synchronize sregs with kvm */
ret = kvm_arch_sync_sregs(cpu);
if (ret) {
+ if (ret == -EINVAL) {
+ error_report("Register sync failed... If you're using kvm-hv.ko,"
+ " only \"-cpu host\" is possible");
+ }
return ret;
}
@@ -641,8 +662,13 @@ static int kvm_put_fp(CPUState *cs)
for (i = 0; i < 32; i++) {
uint64_t vsr[2];
+#ifdef HOST_WORDS_BIGENDIAN
vsr[0] = float64_val(env->fpr[i]);
vsr[1] = env->vsr[i];
+#else
+ vsr[0] = env->vsr[i];
+ vsr[1] = float64_val(env->fpr[i]);
+#endif
reg.addr = (uintptr_t) &vsr;
reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
@@ -712,10 +738,17 @@ static int kvm_get_fp(CPUState *cs)
vsx ? "VSR" : "FPR", i, strerror(errno));
return ret;
} else {
+#ifdef HOST_WORDS_BIGENDIAN
env->fpr[i] = vsr[0];
if (vsx) {
env->vsr[i] = vsr[1];
}
+#else
+ env->fpr[i] = vsr[1];
+ if (vsx) {
+ env->vsr[i] = vsr[0];
+ }
+#endif
}
}
}
@@ -841,6 +874,44 @@ static int kvm_put_vpa(CPUState *cs)
}
#endif /* TARGET_PPC64 */
+int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int i;
+
+ sregs.pvr = env->spr[SPR_PVR];
+
+ sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+ if (env->slb[i].esid & SLB_ESID_V) {
+ sregs.u.s.ppc64.slb[i].slbe |= i;
+ }
+ sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ sregs.u.s.ppc32.sr[i] = env->sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ /* Beware. We have to swap upper and lower bits here */
+ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
+ | env->DBAT[1][i];
+ sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
+ | env->IBAT[1][i];
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -894,39 +965,8 @@ int kvm_arch_put_registers(CPUState *cs, int level)
}
if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
- struct kvm_sregs sregs;
-
- sregs.pvr = env->spr[SPR_PVR];
-
- sregs.u.s.sdr1 = env->spr[SPR_SDR1];
-
- /* Sync SLB */
-#ifdef TARGET_PPC64
- for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
- sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
- if (env->slb[i].esid & SLB_ESID_V) {
- sregs.u.s.ppc64.slb[i].slbe |= i;
- }
- sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
- }
-#endif
-
- /* Sync SRs */
- for (i = 0; i < 16; i++) {
- sregs.u.s.ppc32.sr[i] = env->sr[i];
- }
-
- /* Sync BATs */
- for (i = 0; i < 8; i++) {
- /* Beware. We have to swap upper and lower bits here */
- sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
- | env->DBAT[1][i];
- sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
- | env->IBAT[1][i];
- }
-
- ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
- if (ret) {
+ ret = kvmppc_put_books_sregs(cpu);
+ if (ret < 0) {
return ret;
}
}
@@ -988,12 +1028,197 @@ static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
}
+static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_BASE) {
+ env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
+ env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
+ env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
+ env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
+ env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
+ env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
+ env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
+ env->spr[SPR_DECR] = sregs.u.e.dec;
+ env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
+ env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
+ env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
+ env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
+ env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
+ env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
+ env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
+ env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_64) {
+ env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
+ env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
+ env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
+ kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
+ env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
+ kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
+ env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
+ kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
+ env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
+ kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
+ env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
+ kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
+ env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
+ kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
+ env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
+ kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
+ env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
+ kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
+ env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
+ kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
+ env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
+ kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
+ env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
+ kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
+ env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
+ kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
+ env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
+ kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
+ env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
+ kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
+ env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
+ kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
+ env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
+ kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPE) {
+ env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
+ kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
+ env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
+ env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PM) {
+ env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
+ kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PC) {
+ env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
+ env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
+ }
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+ env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
+ env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
+ env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
+ env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
+ env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
+ env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
+ env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
+ env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
+ env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
+ env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_EXP) {
+ env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PD) {
+ env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
+ env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
+ }
+
+ if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+ env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
+ env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
+ env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
+
+ if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
+ env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
+ env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
+ }
+ }
+
+ return 0;
+}
+
+static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!env->external_htab) {
+ ppc_store_sdr1(env, sregs.u.s.sdr1);
+ }
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ /*
+ * The packed SLB array we get from KVM_GET_SREGS only contains
+ * information about valid entries. So we flush our internal copy
+ * to get rid of stale ones, then put all valid SLB entries back
+ * in.
+ */
+ memset(env->slb, 0, sizeof(env->slb));
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
+ target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
+ /*
+ * Only restore valid entries
+ */
+ if (rb & SLB_ESID_V) {
+ ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
+ }
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ env->sr[i] = sregs.u.s.ppc32.sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
+ env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
+ env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
+ env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
+ }
+
+ return 0;
+}
+
int kvm_arch_get_registers(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
- struct kvm_sregs sregs;
uint32_t cr;
int i, ret;
@@ -1033,174 +1258,17 @@ int kvm_arch_get_registers(CPUState *cs)
kvm_get_fp(cs);
if (cap_booke_sregs) {
- ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ ret = kvmppc_get_booke_sregs(cpu);
if (ret < 0) {
return ret;
}
-
- if (sregs.u.e.features & KVM_SREGS_E_BASE) {
- env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
- env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
- env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
- env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
- env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
- env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
- env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
- env->spr[SPR_DECR] = sregs.u.e.dec;
- env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
- env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
- env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
- env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
- env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
- env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
- env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
- env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_64) {
- env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
- env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
- env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
- kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
- env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
- kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
- env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
- kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
- env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
- kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
- env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
- kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
- env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
- kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
- env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
- kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
- env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
- kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
- env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
- kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
- env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
- kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
- env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
- kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
- env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
- kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
- env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
- kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
- env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
- kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
- env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
- kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
- env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
- kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
-
- if (sregs.u.e.features & KVM_SREGS_E_SPE) {
- env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
- kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
- env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
- kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
- env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
- kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_PM) {
- env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
- kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_PC) {
- env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
- kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
- env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
- kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
- }
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
- env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
- env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
- env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
- env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
- env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
- env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
- env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
- env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
- env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
- env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
- }
-
- if (sregs.u.e.features & KVM_SREGS_EXP) {
- env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_PD) {
- env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
- env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
- }
-
- if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
- env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
- env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
- env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
-
- if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
- env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
- env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
- }
- }
}
if (cap_segstate) {
- ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ ret = kvmppc_get_books_sregs(cpu);
if (ret < 0) {
return ret;
}
-
- if (!env->external_htab) {
- ppc_store_sdr1(env, sregs.u.s.sdr1);
- }
-
- /* Sync SLB */
-#ifdef TARGET_PPC64
- /*
- * The packed SLB array we get from KVM_GET_SREGS only contains
- * information about valid entries. So we flush our internal
- * copy to get rid of stale ones, then put all valid SLB entries
- * back in.
- */
- memset(env->slb, 0, sizeof(env->slb));
- for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
- target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
- target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
- /*
- * Only restore valid entries
- */
- if (rb & SLB_ESID_V) {
- ppc_store_slb(env, rb, rs);
- }
- }
-#endif
-
- /* Sync SRs */
- for (i = 0; i < 16; i++) {
- env->sr[i] = sregs.u.s.ppc32.sr[i];
- }
-
- /* Sync BATs */
- for (i = 0; i < 8; i++) {
- env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
- env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
- env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
- env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
- }
}
if (cap_hior) {
@@ -1310,7 +1378,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
/* Always wake up soon in case the interrupt was level based */
timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- (get_ticks_per_sec() / 50));
+ (NANOSECONDS_PER_SECOND / 50));
}
/* We don't know if there are more interrupts pending after this. However,
@@ -1770,7 +1838,7 @@ uint32_t kvmppc_get_tbfreq(void)
{
char line[512];
char *ns;
- uint32_t retval = get_ticks_per_sec();
+ uint32_t retval = NANOSECONDS_PER_SECOND;
if (read_cpuinfo("timebase", line, sizeof(line))) {
return retval;
@@ -1782,8 +1850,7 @@ uint32_t kvmppc_get_tbfreq(void)
ns++;
- retval = atoi(ns);
- return retval;
+ return atoi(ns);
}
bool kvmppc_get_host_serial(char **value)
@@ -1830,13 +1897,8 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len)
return 0;
}
-/* Read a CPU node property from the host device tree that's a single
- * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
- * (can't find or open the property, or doesn't understand the
- * format) */
-static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
+static uint64_t kvmppc_read_int_dt(const char *filename)
{
- char buf[PATH_MAX], *tmp;
union {
uint32_t v32;
uint64_t v64;
@@ -1844,14 +1906,7 @@ static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
FILE *f;
int len;
- if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
- return -1;
- }
-
- tmp = g_strdup_printf("%s/%s", buf, propname);
-
- f = fopen(tmp, "rb");
- g_free(tmp);
+ f = fopen(filename, "rb");
if (!f) {
return -1;
}
@@ -1869,6 +1924,26 @@ static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
return 0;
}
+/* Read a CPU node property from the host device tree that's a single
+ * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
+ * (can't find or open the property, or doesn't understand the
+ * format) */
+static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
+{
+ char buf[PATH_MAX], *tmp;
+ uint64_t val;
+
+ if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
+ return -1;
+ }
+
+ tmp = g_strdup_printf("%s/%s", buf, propname);
+ val = kvmppc_read_int_dt(tmp);
+ g_free(tmp);
+
+ return val;
+}
+
uint64_t kvmppc_get_clockfreq(void)
{
return kvmppc_read_int_cpu_dt("clock-frequency");
@@ -1933,7 +2008,7 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
hc[2] = cpu_to_be32(0x48000008);
hc[3] = cpu_to_be32(bswap32(0x3860ffff));
- return 0;
+ return 1;
}
static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
@@ -1953,6 +2028,11 @@ void kvmppc_enable_logical_ci_hcalls(void)
kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
}
+void kvmppc_enable_set_mode_hcall(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_SET_MODE);
+}
+
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
@@ -1960,7 +2040,8 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
if (ret) {
- cpu_abort(cs, "This KVM version does not support PAPR\n");
+ error_report("This vCPU type or KVM version does not support PAPR");
+ exit(1);
}
/* Update the capability flag so we sync the right information
@@ -1980,7 +2061,8 @@ void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
if (ret && mpic_proxy) {
- cpu_abort(cs, "This KVM version does not support EPR\n");
+ error_report("This KVM version does not support EPR");
+ exit(1);
}
}
@@ -2066,7 +2148,7 @@ bool kvmppc_spapr_use_multitce(void)
}
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
- bool vfio_accel)
+ bool need_vfio)
{
struct kvm_create_spapr_tce args = {
.liobn = liobn,
@@ -2080,7 +2162,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
* destroying the table, which the upper layers -will- do
*/
*pfd = -1;
- if (!cap_spapr_tce || (vfio_accel && !cap_spapr_vfio)) {
+ if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
return NULL;
}
@@ -2188,6 +2270,7 @@ static void kvmppc_host_cpu_initfn(Object *obj)
static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
{
+ DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
uint32_t vmx = kvmppc_get_vmx();
uint32_t dfp = kvmppc_get_dfp();
@@ -2214,6 +2297,9 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
if (icache_size != -1) {
pcc->l1_icache_size = icache_size;
}
+
+ /* Reason: kvmppc_host_cpu_initfn() dies when !kvm_enabled() */
+ dc->cannot_destroy_with_object_finalize_yet = true;
}
bool kvmppc_has_cap_epr(void)
@@ -2475,7 +2561,7 @@ error_out:
}
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
- uint64_t address, uint32_t data)
+ uint64_t address, uint32_t data, PCIDevice *dev)
{
return 0;
}
@@ -2484,3 +2570,12 @@ int kvm_arch_msi_data_to_gsi(uint32_t data)
{
return data & 0xffff;
}
+
+int kvmppc_enable_hwrng(void)
+{
+ if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
+ return -1;
+ }
+
+ return kvmppc_enable_hcall(kvm_state, H_RANDOM);
+}
diff --git a/qemu/target-ppc/kvm_ppc.c b/qemu/target-ppc/kvm_ppc.c
deleted file mode 100644
index f769acd44..000000000
--- a/qemu/target-ppc/kvm_ppc.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * PowerPC KVM support
- *
- * Copyright IBM Corp. 2008
- *
- * Authors:
- * Hollis Blanchard <hollisb@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "qemu-common.h"
-#include "qemu/timer.h"
-#include "kvm_ppc.h"
-#include "sysemu/device_tree.h"
-#include "qemu/main-loop.h"
-
-#define PROC_DEVTREE_PATH "/proc/device-tree"
-
-static QEMUTimer *kvmppc_timer;
-static unsigned int kvmppc_timer_rate;
-
-static void kvmppc_timer_hack(void *opaque)
-{
- qemu_notify_event();
- timer_mod(kvmppc_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + kvmppc_timer_rate);
-}
-
-void kvmppc_init(void)
-{
- /* XXX The only reason KVM yields control back to qemu is device IO. Since
- * an idle guest does no IO, qemu's device model will never get a chance to
- * run. So, until QEMU gains IO threads, we create this timer to ensure
- * that the device model gets a chance to run. */
- kvmppc_timer_rate = get_ticks_per_sec() / 10;
- kvmppc_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &kvmppc_timer_hack, NULL);
- timer_mod(kvmppc_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + kvmppc_timer_rate);
-}
-
diff --git a/qemu/target-ppc/kvm_ppc.h b/qemu/target-ppc/kvm_ppc.h
index 4d30e2795..fc7931227 100644
--- a/qemu/target-ppc/kvm_ppc.h
+++ b/qemu/target-ppc/kvm_ppc.h
@@ -11,8 +11,6 @@
#define TYPE_HOST_POWERPC_CPU "host-" TYPE_POWERPC_CPU
-void kvmppc_init(void);
-
#ifdef CONFIG_KVM
uint32_t kvmppc_get_tbfreq(void);
@@ -25,6 +23,7 @@ int kvmppc_get_hasidle(CPUPPCState *env);
int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
void kvmppc_enable_logical_ci_hcalls(void);
+void kvmppc_enable_set_mode_hcall(void);
void kvmppc_set_papr(PowerPCCPU *cpu);
int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
@@ -37,7 +36,7 @@ int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
off_t kvmppc_alloc_rma(void **rma);
bool kvmppc_spapr_use_multitce(void);
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
- bool vfio_accel);
+ bool need_vfio);
int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
int kvmppc_reset_htab(int shift_hint);
uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift);
@@ -55,6 +54,8 @@ void kvmppc_hash64_free_pteg(uint64_t token);
void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
target_ulong pte0, target_ulong pte1);
bool kvmppc_has_cap_fixup_hcalls(void);
+int kvmppc_enable_hwrng(void);
+int kvmppc_put_books_sregs(PowerPCCPU *cpu);
#else
@@ -98,17 +99,16 @@ static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_l
return -1;
}
-static inline int kvmppc_read_segment_page_sizes(uint32_t *prop, int maxcells)
+static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
{
return -1;
}
-static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
+static inline void kvmppc_enable_logical_ci_hcalls(void)
{
- return -1;
}
-static inline void kvmppc_enable_logical_ci_hcalls(void)
+static inline void kvmppc_enable_set_mode_hcall(void)
{
}
@@ -176,7 +176,7 @@ static inline int kvmppc_remove_spapr_tce(void *table, int pfd,
static inline int kvmppc_reset_htab(int shift_hint)
{
- return -1;
+ return 0;
}
static inline uint64_t kvmppc_rma_size(uint64_t current_size,
@@ -185,11 +185,6 @@ static inline uint64_t kvmppc_rma_size(uint64_t current_size,
return ram_size;
}
-static inline int kvmppc_update_sdr1(CPUPPCState *env)
-{
- return 0;
-}
-
#endif /* !CONFIG_USER_ONLY */
static inline bool kvmppc_has_cap_epr(void)
@@ -248,18 +243,59 @@ static inline bool kvmppc_has_cap_fixup_hcalls(void)
abort();
}
+static inline int kvmppc_enable_hwrng(void)
+{
+ return -1;
+}
+
+static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ abort();
+}
#endif
#ifndef CONFIG_KVM
+
#define kvmppc_eieio() do { } while (0)
-#else
+
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+#else /* CONFIG_KVM */
+
#define kvmppc_eieio() \
do { \
if (kvm_enabled()) { \
asm volatile("eieio" : : : "memory"); \
} \
} while (0)
-#endif
+
+/* Store data cache blocks back to memory */
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
+ asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+}
+
+/* Invalidate instruction cache blocks */
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
+ asm volatile("icbi 0,%0" : : "r"(p));
+ }
+}
+
+#endif /* CONFIG_KVM */
#ifndef KVM_INTERRUPT_SET
#define KVM_INTERRUPT_SET -1
diff --git a/qemu/target-ppc/machine.c b/qemu/target-ppc/machine.c
index f4ac7611d..46684fb93 100644
--- a/qemu/target-ppc/machine.c
+++ b/qemu/target-ppc/machine.c
@@ -1,7 +1,9 @@
+#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/boards.h"
#include "sysemu/kvm.h"
#include "helper_regs.h"
+#include "mmu-hash64.h"
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
{
@@ -134,7 +136,7 @@ static void cpu_pre_save(void *opaque)
env->spr[SPR_LR] = env->lr;
env->spr[SPR_CTR] = env->ctr;
- env->spr[SPR_XER] = env->xer;
+ env->spr[SPR_XER] = cpu_read_xer(env);
#if defined(TARGET_PPC64)
env->spr[SPR_CFAR] = env->cfar;
#endif
@@ -168,7 +170,7 @@ static int cpu_post_load(void *opaque, int version_id)
env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
env->lr = env->spr[SPR_LR];
env->ctr = env->spr[SPR_CTR];
- env->xer = env->spr[SPR_XER];
+ cpu_write_xer(env, env->spr[SPR_XER]);
#if defined(TARGET_PPC64)
env->cfar = env->spr[SPR_CFAR];
#endif
@@ -352,11 +354,30 @@ static bool slb_needed(void *opaque)
return (cpu->env.mmu_model & POWERPC_MMU_64);
}
+static int slb_post_load(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ /* We've pulled in the raw esid and vsid values from the migration
+ * stream, but we need to recompute the page size pointers */
+ for (i = 0; i < env->slb_nr; i++) {
+ if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
+ /* Migration source had bad values in its SLB */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static const VMStateDescription vmstate_slb = {
.name = "cpu/slb",
.version_id = 1,
.minimum_version_id = 1,
.needed = slb_needed,
+ .post_load = slb_post_load,
.fields = (VMStateField[]) {
VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU),
VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
diff --git a/qemu/target-ppc/mem_helper.c b/qemu/target-ppc/mem_helper.c
index 6d37dae7b..6d584c912 100644
--- a/qemu/target-ppc/mem_helper.c
+++ b/qemu/target-ppc/mem_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/helper-proto.h"
@@ -100,8 +101,10 @@ void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
uint32_t ra, uint32_t rb)
{
if (likely(xer_bc != 0)) {
- if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
- (reg < rb && (reg + xer_bc) > rb))) {
+ int num_used_regs = (xer_bc + 3) / 4;
+ if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
+ lsw_reg_in_range(reg, num_used_regs, rb))) {
+ env->nip += 4; /* Compensate the "nip - 4" from gen_lswx() */
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
diff --git a/qemu/target-ppc/mfrom_table_gen.c b/qemu/target-ppc/mfrom_table_gen.c
index a140ded47..631791808 100644
--- a/qemu/target-ppc/mfrom_table_gen.c
+++ b/qemu/target-ppc/mfrom_table_gen.c
@@ -1,6 +1,5 @@
#define _GNU_SOURCE
-#include <stdint.h>
-#include <stdio.h>
+#include "qemu/osdep.h"
#include <math.h>
int main (void)
diff --git a/qemu/target-ppc/misc_helper.c b/qemu/target-ppc/misc_helper.c
index 6b12ca86a..73e3b0583 100644
--- a/qemu/target-ppc/misc_helper.c
+++ b/qemu/target-ppc/misc_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
diff --git a/qemu/target-ppc/mmu-hash32.c b/qemu/target-ppc/mmu-hash32.c
index dfee358d6..39abb2fd3 100644
--- a/qemu/target-ppc/mmu-hash32.c
+++ b/qemu/target-ppc/mmu-hash32.c
@@ -18,23 +18,18 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash32.h"
+#include "exec/log.h"
-//#define DEBUG_MMU
//#define DEBUG_BAT
-#ifdef DEBUG_MMU
-# define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0)
-#else
-# define LOG_MMU_STATE(cpu) do { } while (0)
-#endif
-
#ifdef DEBUG_BATS
-# define LOG_BATS(...) qemu_log(__VA_ARGS__)
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
#else
# define LOG_BATS(...) do { } while (0)
#endif
@@ -90,9 +85,10 @@ static int ppc_hash32_pp_prot(int key, int pp, int nx)
return prot;
}
-static int ppc_hash32_pte_prot(CPUPPCState *env,
+static int ppc_hash32_pte_prot(PowerPCCPU *cpu,
target_ulong sr, ppc_hash_pte32_t pte)
{
+ CPUPPCState *env = &cpu->env;
unsigned pp, key;
key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
@@ -101,9 +97,11 @@ static int ppc_hash32_pte_prot(CPUPPCState *env,
return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
}
-static target_ulong hash32_bat_size(CPUPPCState *env,
+static target_ulong hash32_bat_size(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
+ CPUPPCState *env = &cpu->env;
+
if ((msr_pr && !(batu & BATU32_VP))
|| (!msr_pr && !(batu & BATU32_VS))) {
return 0;
@@ -112,7 +110,7 @@ static target_ulong hash32_bat_size(CPUPPCState *env,
return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
}
-static int hash32_bat_prot(CPUPPCState *env,
+static int hash32_bat_prot(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
int pp, prot;
@@ -128,7 +126,7 @@ static int hash32_bat_prot(CPUPPCState *env,
return prot;
}
-static target_ulong hash32_bat_601_size(CPUPPCState *env,
+static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
if (!(batl & BATL32_601_V)) {
@@ -138,9 +136,10 @@ static target_ulong hash32_bat_601_size(CPUPPCState *env,
return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
}
-static int hash32_bat_601_prot(CPUPPCState *env,
+static int hash32_bat_601_prot(PowerPCCPU *cpu,
target_ulong batu, target_ulong batl)
{
+ CPUPPCState *env = &cpu->env;
int key, pp;
pp = batu & BATU32_601_PP;
@@ -152,9 +151,10 @@ static int hash32_bat_601_prot(CPUPPCState *env,
return ppc_hash32_pp_prot(key, pp, 0);
}
-static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
+static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
int *prot)
{
+ CPUPPCState *env = &cpu->env;
target_ulong *BATlt, *BATut;
int i;
@@ -173,9 +173,9 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
target_ulong mask;
if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
- mask = hash32_bat_601_size(env, batu, batl);
+ mask = hash32_bat_601_size(cpu, batu, batl);
} else {
- mask = hash32_bat_size(env, batu, batl);
+ mask = hash32_bat_size(cpu, batu, batl);
}
LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
" BATl " TARGET_FMT_lx "\n", __func__,
@@ -185,9 +185,9 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
hwaddr raddr = (batl & mask) | (ea & ~mask);
if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
- *prot = hash32_bat_601_prot(env, batu, batl);
+ *prot = hash32_bat_601_prot(cpu, batu, batl);
} else {
- *prot = hash32_bat_prot(env, batu, batl);
+ *prot = hash32_bat_prot(cpu, batu, batl);
}
return raddr & TARGET_PAGE_MASK;
@@ -216,11 +216,12 @@ static hwaddr ppc_hash32_bat_lookup(CPUPPCState *env, target_ulong ea, int rwx,
return -1;
}
-static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
+static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
target_ulong eaddr, int rwx,
hwaddr *raddr, int *prot)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
@@ -281,9 +282,8 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
}
return 1;
default:
- qemu_log("ERROR: instruction should not need "
+ cpu_abort(cs, "ERROR: instruction should not need "
"address translation\n");
- abort();
}
if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) {
*raddr = eaddr;
@@ -301,12 +301,14 @@ static int ppc_hash32_direct_store(CPUPPCState *env, target_ulong sr,
}
}
-hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash)
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
{
+ CPUPPCState *env = &cpu->env;
+
return (hash * HASH_PTEG_SIZE_32) & env->htab_mask;
}
-static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
+static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
bool secondary, target_ulong ptem,
ppc_hash_pte32_t *pte)
{
@@ -315,8 +317,8 @@ static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
int i;
for (i = 0; i < HPTES_PER_GROUP; i++) {
- pte0 = ppc_hash32_load_hpte0(env, pte_offset);
- pte1 = ppc_hash32_load_hpte1(env, pte_offset);
+ pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
+ pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
if ((pte0 & HPTE32_V_VALID)
&& (secondary == !!(pte0 & HPTE32_V_SECONDARY))
@@ -332,10 +334,11 @@ static hwaddr ppc_hash32_pteg_search(CPUPPCState *env, hwaddr pteg_off,
return -1;
}
-static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
+static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
target_ulong sr, target_ulong eaddr,
ppc_hash_pte32_t *pte)
{
+ CPUPPCState *env = &cpu->env;
hwaddr pteg_off, pte_offset;
hwaddr hash;
uint32_t vsid, pgidx, ptem;
@@ -356,16 +359,16 @@ static hwaddr ppc_hash32_htab_lookup(CPUPPCState *env,
" vsid=%" PRIx32 " ptem=%" PRIx32
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pteg_off = get_pteg_offset32(env, hash);
- pte_offset = ppc_hash32_pteg_search(env, pteg_off, 0, ptem, pte);
+ pteg_off = get_pteg_offset32(cpu, hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=%" PRIx32 " api=%" PRIx32
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pteg_off = get_pteg_offset32(env, ~hash);
- pte_offset = ppc_hash32_pteg_search(env, pteg_off, 1, ptem, pte);
+ pteg_off = get_pteg_offset32(cpu, ~hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
}
return pte_offset;
@@ -407,7 +410,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
/* 2. Check Block Address Translation entries (BATs) */
if (env->nb_BATs != 0) {
- raddr = ppc_hash32_bat_lookup(env, eaddr, rwx, &prot);
+ raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot);
if (raddr != -1) {
if (need_prot[rwx] & ~prot) {
if (rwx == 2) {
@@ -438,7 +441,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
/* 4. Handle direct store segments */
if (sr & SR32_T) {
- if (ppc_hash32_direct_store(env, sr, eaddr, rwx,
+ if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx,
&raddr, &prot) == 0) {
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
raddr & TARGET_PAGE_MASK, prot, mmu_idx,
@@ -457,7 +460,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
}
/* 6. Locate the PTE in the hash table */
- pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISI;
@@ -480,7 +483,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
/* 7. Check access permissions */
- prot = ppc_hash32_pte_prot(env, sr, pte);
+ prot = ppc_hash32_pte_prot(cpu, sr, pte);
if (need_prot[rwx] & ~prot) {
/* Access right violation */
@@ -515,7 +518,7 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
}
if (new_pte1 != pte.pte1) {
- ppc_hash32_store_hpte1(env, pte_offset, new_pte1);
+ ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
}
/* 9. Determine the real address from the PTE */
@@ -528,8 +531,9 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr, int rwx,
return 0;
}
-hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
+hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
{
+ CPUPPCState *env = &cpu->env;
target_ulong sr;
hwaddr pte_offset;
ppc_hash_pte32_t pte;
@@ -541,7 +545,7 @@ hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
}
if (env->nb_BATs != 0) {
- hwaddr raddr = ppc_hash32_bat_lookup(env, eaddr, 0, &prot);
+ hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot);
if (raddr != -1) {
return raddr;
}
@@ -554,7 +558,7 @@ hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong eaddr)
return -1;
}
- pte_offset = ppc_hash32_htab_lookup(env, sr, eaddr, &pte);
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
if (pte_offset == -1) {
return -1;
}
diff --git a/qemu/target-ppc/mmu-hash32.h b/qemu/target-ppc/mmu-hash32.h
index d515d4ff7..afbb9dd3d 100644
--- a/qemu/target-ppc/mmu-hash32.h
+++ b/qemu/target-ppc/mmu-hash32.h
@@ -3,8 +3,8 @@
#ifndef CONFIG_USER_ONLY
-hwaddr get_pteg_offset32(CPUPPCState *env, hwaddr hash);
-hwaddr ppc_hash32_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
+hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);
@@ -65,40 +65,42 @@ int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
#define HPTE32_R_WIMG 0x00000078
#define HPTE32_R_PP 0x00000003
-static inline target_ulong ppc_hash32_load_hpte0(CPUPPCState *env,
+static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu,
hwaddr pte_offset)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- return ldl_phys(cs->as, env->htab_base + pte_offset);
+ return ldl_phys(CPU(cpu)->as, env->htab_base + pte_offset);
}
-static inline target_ulong ppc_hash32_load_hpte1(CPUPPCState *env,
+static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu,
hwaddr pte_offset)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- return ldl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2);
+ return ldl_phys(CPU(cpu)->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2);
}
-static inline void ppc_hash32_store_hpte0(CPUPPCState *env,
+static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu,
hwaddr pte_offset, target_ulong pte0)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- stl_phys(cs->as, env->htab_base + pte_offset, pte0);
+ stl_phys(CPU(cpu)->as, env->htab_base + pte_offset, pte0);
}
-static inline void ppc_hash32_store_hpte1(CPUPPCState *env,
+static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
hwaddr pte_offset, target_ulong pte1)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
assert(!env->external_htab); /* Not supported on 32-bit for now */
- stl_phys(cs->as, env->htab_base + pte_offset + HASH_PTE_SIZE_32/2, pte1);
+ stl_phys(CPU(cpu)->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
}
typedef struct {
diff --git a/qemu/target-ppc/mmu-hash64.c b/qemu/target-ppc/mmu-hash64.c
index 7df6edebf..72c4ab5d7 100644
--- a/qemu/target-ppc/mmu-hash64.c
+++ b/qemu/target-ppc/mmu-hash64.c
@@ -17,38 +17,38 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "cpu.h"
#include "exec/helper-proto.h"
+#include "qemu/error-report.h"
#include "sysemu/kvm.h"
+#include "qemu/error-report.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"
+#include "exec/log.h"
-//#define DEBUG_MMU
//#define DEBUG_SLB
-#ifdef DEBUG_MMU
-# define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0)
-#else
-# define LOG_MMU_STATE(cpu) do { } while (0)
-#endif
-
#ifdef DEBUG_SLB
-# define LOG_SLB(...) qemu_log(__VA_ARGS__)
+# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
#else
# define LOG_SLB(...) do { } while (0)
#endif
/*
- * Used to indicate whether we have allocated htab in the
- * host kernel
+ * Used to indicate that a CPU has its hash page table (HPT) managed
+ * within the host kernel
*/
-bool kvmppc_kern_htab;
+#define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1)
+
/*
* SLB handling
*/
-static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
+static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
{
+ CPUPPCState *env = &cpu->env;
uint64_t esid_256M, esid_1T;
int n;
@@ -76,12 +76,13 @@ static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
return NULL;
}
-void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
+void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
int i;
uint64_t slbe, slbv;
- cpu_synchronize_state(CPU(ppc_env_get_cpu(env)));
+ cpu_synchronize_state(CPU(cpu));
cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
for (i = 0; i < env->slb_nr; i++) {
@@ -124,7 +125,7 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
PowerPCCPU *cpu = ppc_env_get_cpu(env);
ppc_slb_t *slb;
- slb = slb_lookup(env, addr);
+ slb = slb_lookup(cpu, addr);
if (!slb) {
return;
}
@@ -140,35 +141,62 @@ void helper_slbie(CPUPPCState *env, target_ulong addr)
}
}
-int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid)
{
- int slot = rb & 0xfff;
+ CPUPPCState *env = &cpu->env;
ppc_slb_t *slb = &env->slb[slot];
+ const struct ppc_one_seg_page_size *sps = NULL;
+ int i;
- if (rb & (0x1000 - env->slb_nr)) {
- return -1; /* Reserved bits set or slot too high */
+ if (slot >= env->slb_nr) {
+ return -1; /* Bad slot number */
+ }
+ if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
+ return -1; /* Reserved bits set */
}
- if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
+ if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
return -1; /* Bad segment size */
}
- if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
+ if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
return -1; /* 1T segment on MMU that doesn't support it */
}
- /* Mask out the slot number as we store the entry */
- slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
- slb->vsid = rs;
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
+ " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
+ slot, esid, vsid);
+ return -1;
+ }
+
+ slb->esid = esid;
+ slb->vsid = vsid;
+ slb->sps = sps;
LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
- " %016" PRIx64 "\n", __func__, slot, rb, rs,
+ " %016" PRIx64 "\n", __func__, slot, esid, vsid,
slb->esid, slb->vsid);
return 0;
}
-static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
+static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
target_ulong *rt)
{
+ CPUPPCState *env = &cpu->env;
int slot = rb & 0xfff;
ppc_slb_t *slb = &env->slb[slot];
@@ -180,9 +208,10 @@ static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
return 0;
}
-static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
+static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
target_ulong *rt)
{
+ CPUPPCState *env = &cpu->env;
int slot = rb & 0xfff;
ppc_slb_t *slb = &env->slb[slot];
@@ -196,7 +225,9 @@ static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
- if (ppc_store_slb(env, rb, rs) < 0) {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}
@@ -204,9 +235,10 @@ void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
target_ulong rt = 0;
- if (ppc_load_slb_esid(env, rb, &rt) < 0) {
+ if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}
@@ -215,9 +247,10 @@ target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
target_ulong rt = 0;
- if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
+ if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL);
}
@@ -227,10 +260,58 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
/*
* 64-bit hash table MMU handling
*/
+void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
+ Error **errp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong htabsize = value & SDR_64_HTABSIZE;
+
+ env->spr[SPR_SDR1] = value;
+ if (htabsize > 28) {
+ error_setg(errp,
+ "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
+ htabsize);
+ htabsize = 28;
+ }
+ env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
+ env->htab_base = value & SDR_64_HTABORG;
+}
+
+void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
+ Error **errp)
+{
+ CPUPPCState *env = &cpu->env;
+ Error *local_err = NULL;
-static int ppc_hash64_pte_prot(CPUPPCState *env,
+ cpu_synchronize_state(CPU(cpu));
+
+ if (hpt) {
+ env->external_htab = hpt;
+ } else {
+ env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
+ }
+ ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18),
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* Not strictly necessary, but makes it clearer that an external
+ * htab is in use when debugging */
+ env->htab_base = -1;
+
+ if (kvm_enabled()) {
+ if (kvmppc_put_books_sregs(cpu) < 0) {
+ error_setg(errp, "Unable to update SDR1 in KVM");
+ }
+ }
+}
+
+static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
ppc_slb_t *slb, ppc_hash_pte64_t pte)
{
+ CPUPPCState *env = &cpu->env;
unsigned pp, key;
/* Some pp bit combinations have undefined behaviour, so default
* to no access in those cases */
@@ -280,12 +361,12 @@ static int ppc_hash64_pte_prot(CPUPPCState *env,
return prot;
}
-static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
+static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
{
+ CPUPPCState *env = &cpu->env;
int key, amrbits;
int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
-
/* Only recent MMUs implement Virtual Page Class Key Protection */
if (!(env->mmu_model & POWERPC_MMU_AMR)) {
return prot;
@@ -321,25 +402,16 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
hwaddr pte_offset;
pte_offset = pte_index * HASH_PTE_SIZE_64;
- if (kvmppc_kern_htab) {
+ if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
/*
* HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
*/
token = kvmppc_hash64_read_pteg(cpu, pte_index);
- if (token) {
- return token;
- }
+ } else if (cpu->env.external_htab) {
/*
- * pteg read failed, even though we have allocated htab via
- * kvmppc_reset_htab.
+ * HTAB is controlled by QEMU. Just point to the internally
+ * accessible PTEG.
*/
- return 0;
- }
- /*
- * HTAB is controlled by QEMU. Just point to the internally
- * accessible PTEG.
- */
- if (cpu->env.external_htab) {
token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
} else if (cpu->env.htab_base) {
token = cpu->env.htab_base + pte_offset;
@@ -347,86 +419,73 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
return token;
}
-void ppc_hash64_stop_access(uint64_t token)
+void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
{
- if (kvmppc_kern_htab) {
+ if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
kvmppc_hash64_free_pteg(token);
}
}
-static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
+static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
bool secondary, target_ulong ptem,
ppc_hash_pte64_t *pte)
{
+ CPUPPCState *env = &cpu->env;
int i;
uint64_t token;
target_ulong pte0, pte1;
target_ulong pte_index;
pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
- token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
+ token = ppc_hash64_start_access(cpu, pte_index);
if (!token) {
return -1;
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
- pte0 = ppc_hash64_load_hpte0(env, token, i);
- pte1 = ppc_hash64_load_hpte1(env, token, i);
+ pte0 = ppc_hash64_load_hpte0(cpu, token, i);
+ pte1 = ppc_hash64_load_hpte1(cpu, token, i);
if ((pte0 & HPTE64_V_VALID)
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY))
&& HPTE64_V_COMPARE(pte0, ptem)) {
pte->pte0 = pte0;
pte->pte1 = pte1;
- ppc_hash64_stop_access(token);
+ ppc_hash64_stop_access(cpu, token);
return (pte_index + i) * HASH_PTE_SIZE_64;
}
}
- ppc_hash64_stop_access(token);
+ ppc_hash64_stop_access(cpu, token);
/*
* We didn't find a valid entry.
*/
return -1;
}
-static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
-{
- uint64_t epnshift;
-
- /* Page size according to the SLB, which we use to generate the
- * EPN for hash table lookup.. When we implement more recent MMU
- * extensions this might be different from the actual page size
- * encoded in the PTE */
- if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
- epnshift = TARGET_PAGE_BITS;
- } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
- epnshift = TARGET_PAGE_BITS_64K;
- } else {
- epnshift = TARGET_PAGE_BITS_16M;
- }
- return epnshift;
-}
-
-static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
+static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
ppc_slb_t *slb, target_ulong eaddr,
ppc_hash_pte64_t *pte)
{
+ CPUPPCState *env = &cpu->env;
hwaddr pte_offset;
hwaddr hash;
- uint64_t vsid, epnshift, epnmask, epn, ptem;
+ uint64_t vsid, epnmask, epn, ptem;
+
+ /* The SLB store path should prevent any bad page size encodings
+ * getting in there, so: */
+ assert(slb->sps);
- epnshift = ppc_hash64_page_shift(slb);
- epnmask = ~((1ULL << epnshift) - 1);
+ epnmask = ~((1ULL << slb->sps->page_shift) - 1);
if (slb->vsid & SLB_VSID_B) {
/* 1TB segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
- hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
+ hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
} else {
/* 256M segment */
vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
- hash = vsid ^ (epn >> epnshift);
+ hash = vsid ^ (epn >> slb->sps->page_shift);
}
ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
@@ -442,7 +501,7 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
@@ -452,24 +511,82 @@ static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
}
return pte_offset;
}
-static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
- target_ulong eaddr)
+static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
+ uint64_t pte0, uint64_t pte1)
+{
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ if (sps->page_shift != 12) {
+ /* 4kiB page in a non 4kiB segment */
+ return 0;
+ }
+ /* Normal 4kiB page */
+ return 12;
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_page_size *ps = &sps->enc[i];
+ uint64_t mask;
+
+ if (!ps->page_shift) {
+ break;
+ }
+
+ if (ps->page_shift == 12) {
+ /* L bit is set so this can't be a 4kiB page */
+ continue;
+ }
+
+ mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+ if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
+ return ps->page_shift;
+ }
+ }
+
+ return 0; /* Bad page size encoding */
+}
+
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1,
+ unsigned *seg_page_shift)
{
- hwaddr mask;
- int target_page_bits;
- hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ *seg_page_shift = 12;
+ return 12;
+ }
+
/*
- * We support 4K, 64K and 16M now
+ * The encodings in env->sps need to be carefully chosen so that
+ * this gives an unambiguous result.
*/
- target_page_bits = ppc_hash64_page_shift(slb);
- mask = (1ULL << target_page_bits) - 1;
- return (rpn & ~mask) | (eaddr & mask);
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+ unsigned shift;
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ shift = hpte_page_shift(sps, pte0, pte1);
+ if (shift) {
+ *seg_page_shift = sps->page_shift;
+ return shift;
+ }
+ }
+
+ *seg_page_shift = 0;
+ return 0;
}
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
@@ -478,6 +595,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
+ unsigned apshift;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
int pp_prot, amr_prot, prot;
@@ -499,7 +617,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
/* 2. Translation is on, so look up the SLB */
- slb = slb_lookup(env, eaddr);
+ slb = slb_lookup(cpu, eaddr);
if (!slb) {
if (rwx == 2) {
@@ -521,7 +639,7 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
/* 4. Locate the PTE in the hash table */
- pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
if (pte_offset == -1) {
if (rwx == 2) {
cs->exception_index = POWERPC_EXCP_ISI;
@@ -541,10 +659,22 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
qemu_log_mask(CPU_LOG_MMU,
"found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
+ /* Validate page size encoding */
+ apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
+ if (!apshift) {
+ error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
+ " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
+ /* Not entirely sure what the right action here, but machine
+ * check seems reasonable */
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ env->error_code = 0;
+ return 1;
+ }
+
/* 5. Check access permissions */
- pp_prot = ppc_hash64_pte_prot(env, slb, pte);
- amr_prot = ppc_hash64_amr_prot(env, pte);
+ pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
+ amr_prot = ppc_hash64_amr_prot(cpu, pte);
prot = pp_prot & amr_prot;
if ((need_prot[rwx] & ~prot) != 0) {
@@ -587,51 +717,59 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
}
if (new_pte1 != pte.pte1) {
- ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64,
+ ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
pte.pte0, new_pte1);
}
/* 7. Determine the real address from the PTE */
- raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);
+ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ prot, mmu_idx, 1ULL << apshift);
return 0;
}
-hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
+hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
+ CPUPPCState *env = &cpu->env;
ppc_slb_t *slb;
hwaddr pte_offset;
ppc_hash_pte64_t pte;
+ unsigned apshift;
if (msr_dr == 0) {
/* In real mode the top 4 effective address bits are ignored */
return addr & 0x0FFFFFFFFFFFFFFFULL;
}
- slb = slb_lookup(env, addr);
+ slb = slb_lookup(cpu, addr);
if (!slb) {
return -1;
}
- pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte);
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
if (pte_offset == -1) {
return -1;
}
- return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
+ apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
+ if (!apshift) {
+ return -1;
+ }
+
+ return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
+ & TARGET_PAGE_MASK;
}
-void ppc_hash64_store_hpte(CPUPPCState *env,
+void ppc_hash64_store_hpte(PowerPCCPU *cpu,
target_ulong pte_index,
target_ulong pte0, target_ulong pte1)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
- if (kvmppc_kern_htab) {
+ if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
return;
}
@@ -639,9 +777,22 @@ void ppc_hash64_store_hpte(CPUPPCState *env,
pte_index *= HASH_PTE_SIZE_64;
if (env->external_htab) {
stq_p(env->external_htab + pte_index, pte0);
- stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
+ stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
} else {
- stq_phys(cs->as, env->htab_base + pte_index, pte0);
- stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
+ stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
+ stq_phys(CPU(cpu)->as,
+ env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
}
}
+
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ /*
+ * XXX: given the fact that there are too many segments to
+ * invalidate, and we still don't have a tlb_flush_mask(env, n,
+ * mask) in QEMU, we just invalidate all TLBs
+ */
+ tlb_flush(CPU(cpu), 1);
+}
diff --git a/qemu/target-ppc/mmu-hash64.h b/qemu/target-ppc/mmu-hash64.h
index 291750f3e..9bf8b9b26 100644
--- a/qemu/target-ppc/mmu-hash64.h
+++ b/qemu/target-ppc/mmu-hash64.h
@@ -4,13 +4,21 @@
#ifndef CONFIG_USER_ONLY
#ifdef TARGET_PPC64
-void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
-int ppc_store_slb (CPUPPCState *env, target_ulong rb, target_ulong rs);
-hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr);
+void ppc_hash64_check_page_sizes(PowerPCCPU *cpu, Error **errp);
+void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid);
+hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong address, int rw,
int mmu_idx);
-void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
+void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
target_ulong pte0, target_ulong pte1);
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1);
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1,
+ unsigned *seg_page_shift);
#endif
/*
@@ -40,6 +48,8 @@ void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
#define SLB_VSID_4K 0x0000000000000000ULL
#define SLB_VSID_64K 0x0000000000000110ULL
+#define SLB_VSID_16M 0x0000000000000100ULL
+#define SLB_VSID_16G 0x0000000000000120ULL
/*
* Hash page table definitions
@@ -80,36 +90,39 @@ void ppc_hash64_store_hpte(CPUPPCState *env, target_ulong index,
#define HPTE64_V_1TB_SEG 0x4000000000000000ULL
#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL
+void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
+ Error **errp);
+void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
+ Error **errp);
-extern bool kvmppc_kern_htab;
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
-void ppc_hash64_stop_access(uint64_t token);
+void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token);
-static inline target_ulong ppc_hash64_load_hpte0(CPUPPCState *env,
+static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
uint64_t token, int index)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
uint64_t addr;
addr = token + (index * HASH_PTE_SIZE_64);
if (env->external_htab) {
return ldq_p((const void *)(uintptr_t)addr);
} else {
- return ldq_phys(cs->as, addr);
+ return ldq_phys(CPU(cpu)->as, addr);
}
}
-static inline target_ulong ppc_hash64_load_hpte1(CPUPPCState *env,
+static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
uint64_t token, int index)
{
- CPUState *cs = CPU(ppc_env_get_cpu(env));
+ CPUPPCState *env = &cpu->env;
uint64_t addr;
addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
if (env->external_htab) {
return ldq_p((const void *)(uintptr_t)addr);
} else {
- return ldq_phys(cs->as, addr);
+ return ldq_phys(CPU(cpu)->as, addr);
}
}
diff --git a/qemu/target-ppc/mmu_helper.c b/qemu/target-ppc/mmu_helper.c
index 527c6adca..ff217941b 100644
--- a/qemu/target-ppc/mmu_helper.c
+++ b/qemu/target-ppc/mmu_helper.c
@@ -16,6 +16,8 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
+#include "qapi/error.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "sysemu/kvm.h"
@@ -23,28 +25,28 @@
#include "mmu-hash64.h"
#include "mmu-hash32.h"
#include "exec/cpu_ldst.h"
+#include "exec/log.h"
//#define DEBUG_MMU
//#define DEBUG_BATS
//#define DEBUG_SOFTWARE_TLB
//#define DUMP_PAGE_TABLES
-//#define DEBUG_SOFTWARE_TLB
//#define FLUSH_ALL_TLBS
#ifdef DEBUG_MMU
-# define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0)
+# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
#else
# define LOG_MMU_STATE(cpu) do { } while (0)
#endif
#ifdef DEBUG_SOFTWARE_TLB
-# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
+# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
#else
# define LOG_SWTLB(...) do { } while (0)
#endif
#ifdef DEBUG_BATS
-# define LOG_BATS(...) qemu_log(__VA_ARGS__)
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
#else
# define LOG_BATS(...) do { } while (0)
#endif
@@ -162,7 +164,7 @@ static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
if (ctx->raddr != (hwaddr)-1ULL) {
/* all matches should have equal RPN, WIMG & PP */
if ((ctx->raddr & mmask) != (pte1 & mmask)) {
- qemu_log("Bad RPN/WIMG/PP\n");
+ qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
return -3;
}
}
@@ -508,7 +510,7 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
/* Software TLB search */
ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
#if defined(DUMP_PAGE_TABLES)
- if (qemu_log_enabled()) {
+ if (qemu_log_mask(CPU_LOG_MMU)) {
hwaddr curaddr;
uint32_t a0, a1, a2, a3;
@@ -575,8 +577,8 @@ static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
/* eciwx or ecowx */
return -4;
default:
- qemu_log("ERROR: instruction should not need "
- "address translation\n");
+ qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
+ "address translation\n");
return -4;
}
if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
@@ -658,32 +660,6 @@ static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
tlb_flush(CPU(cpu), 1);
}
-static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
- target_ulong eaddr, uint32_t pid)
-{
-#if !defined(FLUSH_ALL_TLBS)
- CPUState *cs = CPU(ppc_env_get_cpu(env));
- ppcemb_tlb_t *tlb;
- hwaddr raddr;
- target_ulong page, end;
- int i;
-
- for (i = 0; i < env->nb_tlb; i++) {
- tlb = &env->tlb.tlbe[i];
- if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
- end = tlb->EPN + tlb->size;
- for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
- tlb_flush_page(cs, page);
- }
- tlb->prot &= ~PAGE_VALID;
- break;
- }
- }
-#else
- ppc4xx_tlb_invalidate_all(env);
-#endif
-}
-
static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
target_ulong address, int rw,
int access_type)
@@ -1293,10 +1269,12 @@ void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
case POWERPC_MMU_2_06:
case POWERPC_MMU_2_06a:
- case POWERPC_MMU_2_06d:
- dump_slb(f, cpu_fprintf, env);
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+ dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env));
break;
#endif
default:
@@ -1433,15 +1411,17 @@ hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
switch (env->mmu_model) {
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
case POWERPC_MMU_2_06:
case POWERPC_MMU_2_06a:
- case POWERPC_MMU_2_06d:
- return ppc_hash64_get_phys_page_debug(env, addr);
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+ return ppc_hash64_get_phys_page_debug(cpu, addr);
#endif
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
- return ppc_hash32_get_phys_page_debug(env, addr);
+ return ppc_hash32_get_phys_page_debug(cpu, addr);
default:
;
@@ -1507,6 +1487,7 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
int rw, int mmu_idx)
{
CPUState *cs = CPU(ppc_env_get_cpu(env));
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
mmu_ctx_t ctx;
int access_type;
int ret = 0;
@@ -1608,9 +1589,9 @@ static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
tlb_miss:
env->error_code |= ctx.key << 19;
env->spr[SPR_HASH1] = env->htab_base +
- get_pteg_offset32(env, ctx.hash[0]);
+ get_pteg_offset32(cpu, ctx.hash[0]);
env->spr[SPR_HASH2] = env->htab_base +
- get_pteg_offset32(env, ctx.hash[1]);
+ get_pteg_offset32(cpu, ctx.hash[1]);
break;
case POWERPC_MMU_SOFT_74xx:
if (rw == 1) {
@@ -1937,9 +1918,11 @@ void ppc_tlb_invalidate_all(CPUPPCState *env)
case POWERPC_MMU_601:
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
case POWERPC_MMU_2_06:
case POWERPC_MMU_2_06a:
- case POWERPC_MMU_2_06d:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
#endif /* defined(TARGET_PPC64) */
tlb_flush(CPU(cpu), 1);
break;
@@ -1965,25 +1948,6 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
ppc6xx_tlb_invalidate_virt(env, addr, 1);
}
break;
- case POWERPC_MMU_SOFT_4xx:
- case POWERPC_MMU_SOFT_4xx_Z:
- ppc4xx_tlb_invalidate_virt(env, addr, env->spr[SPR_40x_PID]);
- break;
- case POWERPC_MMU_REAL:
- cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
- break;
- case POWERPC_MMU_MPC8xx:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
- break;
- case POWERPC_MMU_BOOKE:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
- break;
- case POWERPC_MMU_BOOKE206:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "BookE 2.06 MMU model is not implemented\n");
- break;
case POWERPC_MMU_32B:
case POWERPC_MMU_601:
/* tlbie invalidate TLBs for all segments */
@@ -2011,9 +1975,11 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
break;
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
case POWERPC_MMU_2_06:
case POWERPC_MMU_2_06a:
- case POWERPC_MMU_2_06d:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
/* tlbie invalidate TLBs for all segments */
/* XXX: given the fact that there are too many segments to invalidate,
* and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
@@ -2023,9 +1989,8 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
break;
#endif /* defined(TARGET_PPC64) */
default:
- /* XXX: TODO */
- cpu_abort(CPU(cpu), "Unknown MMU model\n");
- break;
+ /* Should never reach here with other MMU models */
+ assert(0);
}
#else
ppc_tlb_invalidate_all(env);
@@ -2041,15 +2006,14 @@ void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
env->spr[SPR_SDR1] = value;
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
- target_ulong htabsize = value & SDR_64_HTABSIZE;
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ Error *local_err = NULL;
- if (htabsize > 28) {
- fprintf(stderr, "Invalid HTABSIZE 0x" TARGET_FMT_lx
- " stored in SDR1\n", htabsize);
- htabsize = 28;
+ ppc_hash64_set_sdr1(cpu, value, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ error_free(local_err);
}
- env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
- env->htab_base = value & SDR_64_HTABORG;
} else
#endif /* defined(TARGET_PPC64) */
{
@@ -2080,21 +2044,17 @@ void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
(int)srnum, value, env->sr[srnum]);
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
- uint64_t rb = 0, rs = 0;
+ uint64_t esid, vsid;
/* ESID = srnum */
- rb |= ((uint32_t)srnum & 0xf) << 28;
- /* Set the valid bit */
- rb |= SLB_ESID_V;
- /* Index = ESID */
- rb |= (uint32_t)srnum;
+ esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
/* VSID = VSID */
- rs |= (value & 0xfffffff) << 12;
+ vsid = (value & 0xfffffff) << 12;
/* flags = flags */
- rs |= ((value >> 27) & 0xf) << 8;
+ vsid |= ((value >> 27) & 0xf) << 8;
- ppc_store_slb(env, rb, rs);
+ ppc_store_slb(cpu, srnum, esid, vsid);
} else
#endif
if (env->sr[srnum] != value) {
@@ -2128,6 +2088,16 @@ void helper_tlbie(CPUPPCState *env, target_ulong addr)
ppc_tlb_invalidate_one(env, addr);
}
+void helper_tlbiva(CPUPPCState *env, target_ulong addr)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ /* tlbiva instruction only exists on BookE */
+ assert(env->mmu_model == POWERPC_MMU_BOOKE);
+ /* XXX: TODO */
+ cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
+}
+
/* Software driven TLBs management */
/* PowerPC 602/603 software TLB load instructions helpers */
static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
diff --git a/qemu/target-ppc/monitor.c b/qemu/target-ppc/monitor.c
new file mode 100644
index 000000000..c2d0806dd
--- /dev/null
+++ b/qemu/target-ppc/monitor.c
@@ -0,0 +1,147 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ unsigned int u;
+ int i;
+
+ u = 0;
+ for (i = 0; i < 8; i++)
+ u |= env->crf[i] << (32 - (4 * (i + 1)));
+
+ return u;
+}
+
+static target_long monitor_get_decr (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return cpu_ppc_load_decr(env);
+}
+
+static target_long monitor_get_tbu (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return cpu_ppc_load_tbu(env);
+}
+
+static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return cpu_ppc_load_tbl(env);
+}
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env();
+
+ dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
+}
+
+const MonitorDef monitor_defs[] = {
+ { "fpscr", offsetof(CPUPPCState, fpscr) },
+ /* Next instruction pointer */
+ { "nip|pc", offsetof(CPUPPCState, nip) },
+ { "lr", offsetof(CPUPPCState, lr) },
+ { "ctr", offsetof(CPUPPCState, ctr) },
+ { "decr", 0, &monitor_get_decr, },
+ { "ccr|cr", 0, &monitor_get_ccr, },
+ /* Machine state register */
+ { "xer", offsetof(CPUPPCState, xer) },
+ { "msr", offsetof(CPUPPCState, msr) },
+ { "tbu", 0, &monitor_get_tbu, },
+ { "tbl", 0, &monitor_get_tbl, },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
+
+static int ppc_cpu_get_reg_num(const char *numstr, int maxnum, int *pregnum)
+{
+ int regnum;
+ char *endptr = NULL;
+
+ if (!*numstr) {
+ return false;
+ }
+
+ regnum = strtoul(numstr, &endptr, 10);
+ if (*endptr || (regnum >= maxnum)) {
+ return false;
+ }
+ *pregnum = regnum;
+
+ return true;
+}
+
+int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
+{
+ int i, regnum;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ /* General purpose registers */
+ if ((tolower(name[0]) == 'r') &&
+ ppc_cpu_get_reg_num(name + 1, ARRAY_SIZE(env->gpr), &regnum)) {
+ *pval = env->gpr[regnum];
+ return 0;
+ }
+
+ /* Floating point registers */
+ if ((tolower(name[0]) == 'f') &&
+ ppc_cpu_get_reg_num(name + 1, ARRAY_SIZE(env->fpr), &regnum)) {
+ *pval = env->fpr[regnum];
+ return 0;
+ }
+
+ /* Special purpose registers */
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); ++i) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (spr->name && (strcasecmp(name, spr->name) == 0)) {
+ *pval = env->spr[i];
+ return 0;
+ }
+ }
+
+ /* Segment registers */
+#if !defined(CONFIG_USER_ONLY)
+ if ((strncasecmp(name, "sr", 2) == 0) &&
+ ppc_cpu_get_reg_num(name + 2, ARRAY_SIZE(env->sr), &regnum)) {
+ *pval = env->sr[regnum];
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
diff --git a/qemu/target-ppc/timebase_helper.c b/qemu/target-ppc/timebase_helper.c
index 865dcbed2..3b340d70d 100644
--- a/qemu/target-ppc/timebase_helper.c
+++ b/qemu/target-ppc/timebase_helper.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
@@ -130,13 +131,14 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
uint32_t val = 0;
if (unlikely(env->dcr_env == NULL)) {
- qemu_log("No DCR environment\n");
+ qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_INVAL);
} else if (unlikely(ppc_dcr_read(env->dcr_env,
(uint32_t)dcrn, &val) != 0)) {
- qemu_log("DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
+ qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
+ (uint32_t)dcrn, (uint32_t)dcrn);
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
}
@@ -146,13 +148,14 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
{
if (unlikely(env->dcr_env == NULL)) {
- qemu_log("No DCR environment\n");
+ qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_INVAL);
} else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn,
(uint32_t)val) != 0)) {
- qemu_log("DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn);
+ qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
+ (uint32_t)dcrn, (uint32_t)dcrn);
helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
}
diff --git a/qemu/target-ppc/translate.c b/qemu/target-ppc/translate.c
index 84c5cead6..b3860ecde 100644
--- a/qemu/target-ppc/translate.c
+++ b/qemu/target-ppc/translate.c
@@ -18,6 +18,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "disas/disas.h"
#include "tcg-op.h"
@@ -28,6 +29,7 @@
#include "exec/helper-gen.h"
#include "trace-tcg.h"
+#include "exec/log.h"
#define CPU_SINGLE_STEP 0x1
@@ -47,7 +49,7 @@
/* Code translation helpers */
/* global register indexes */
-static TCGv_ptr cpu_env;
+static TCGv_env cpu_env;
static char cpu_reg_names[10*3 + 22*4 /* GPR */
+ 10*4 + 22*5 /* SPE GPRh */
+ 10*4 + 22*5 /* FPR */
@@ -91,7 +93,7 @@ void ppc_translate_init(void)
for (i = 0; i < 8; i++) {
snprintf(p, cpu_reg_names_size, "crf%d", i);
- cpu_crf[i] = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUPPCState, crf[i]), p);
p += 5;
cpu_reg_names_size -= 5;
@@ -99,28 +101,28 @@ void ppc_translate_init(void)
for (i = 0; i < 32; i++) {
snprintf(p, cpu_reg_names_size, "r%d", i);
- cpu_gpr[i] = tcg_global_mem_new(TCG_AREG0,
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, gpr[i]), p);
p += (i < 10) ? 3 : 4;
cpu_reg_names_size -= (i < 10) ? 3 : 4;
snprintf(p, cpu_reg_names_size, "r%dH", i);
- cpu_gprh[i] = tcg_global_mem_new(TCG_AREG0,
+ cpu_gprh[i] = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, gprh[i]), p);
p += (i < 10) ? 4 : 5;
cpu_reg_names_size -= (i < 10) ? 4 : 5;
snprintf(p, cpu_reg_names_size, "fp%d", i);
- cpu_fpr[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUPPCState, fpr[i]), p);
p += (i < 10) ? 4 : 5;
cpu_reg_names_size -= (i < 10) ? 4 : 5;
snprintf(p, cpu_reg_names_size, "avr%dH", i);
#ifdef HOST_WORDS_BIGENDIAN
- cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUPPCState, avr[i].u64[0]), p);
#else
- cpu_avrh[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUPPCState, avr[i].u64[1]), p);
#endif
p += (i < 10) ? 6 : 7;
@@ -128,55 +130,55 @@ void ppc_translate_init(void)
snprintf(p, cpu_reg_names_size, "avr%dL", i);
#ifdef HOST_WORDS_BIGENDIAN
- cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUPPCState, avr[i].u64[1]), p);
#else
- cpu_avrl[i] = tcg_global_mem_new_i64(TCG_AREG0,
+ cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUPPCState, avr[i].u64[0]), p);
#endif
p += (i < 10) ? 6 : 7;
cpu_reg_names_size -= (i < 10) ? 6 : 7;
snprintf(p, cpu_reg_names_size, "vsr%d", i);
- cpu_vsr[i] = tcg_global_mem_new_i64(TCG_AREG0,
- offsetof(CPUPPCState, vsr[i]), p);
+ cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, vsr[i]), p);
p += (i < 10) ? 5 : 6;
cpu_reg_names_size -= (i < 10) ? 5 : 6;
}
- cpu_nip = tcg_global_mem_new(TCG_AREG0,
+ cpu_nip = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, nip), "nip");
- cpu_msr = tcg_global_mem_new(TCG_AREG0,
+ cpu_msr = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, msr), "msr");
- cpu_ctr = tcg_global_mem_new(TCG_AREG0,
+ cpu_ctr = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, ctr), "ctr");
- cpu_lr = tcg_global_mem_new(TCG_AREG0,
+ cpu_lr = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, lr), "lr");
#if defined(TARGET_PPC64)
- cpu_cfar = tcg_global_mem_new(TCG_AREG0,
+ cpu_cfar = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, cfar), "cfar");
#endif
- cpu_xer = tcg_global_mem_new(TCG_AREG0,
+ cpu_xer = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, xer), "xer");
- cpu_so = tcg_global_mem_new(TCG_AREG0,
+ cpu_so = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, so), "SO");
- cpu_ov = tcg_global_mem_new(TCG_AREG0,
+ cpu_ov = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, ov), "OV");
- cpu_ca = tcg_global_mem_new(TCG_AREG0,
+ cpu_ca = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, ca), "CA");
- cpu_reserve = tcg_global_mem_new(TCG_AREG0,
+ cpu_reserve = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, reserve_addr),
"reserve_addr");
- cpu_fpscr = tcg_global_mem_new(TCG_AREG0,
+ cpu_fpscr = tcg_global_mem_new(cpu_env,
offsetof(CPUPPCState, fpscr), "fpscr");
- cpu_access_type = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_access_type = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUPPCState, access_type), "access_type");
done_init = 1;
@@ -2500,18 +2502,31 @@ static void gen_fmrgow(DisasContext *ctx)
static void gen_mcrfs(DisasContext *ctx)
{
TCGv tmp = tcg_temp_new();
+ TCGv_i32 tmask;
+ TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
int bfa;
+ int nibble;
+ int shift;
if (unlikely(!ctx->fpu_enabled)) {
gen_exception(ctx, POWERPC_EXCP_FPU);
return;
}
- bfa = 4 * (7 - crfS(ctx->opcode));
- tcg_gen_shri_tl(tmp, cpu_fpscr, bfa);
+ bfa = crfS(ctx->opcode);
+ nibble = 7 - bfa;
+ shift = 4 * nibble;
+ tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
- tcg_temp_free(tmp);
tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
- tcg_gen_andi_tl(cpu_fpscr, cpu_fpscr, ~(0xF << bfa));
+ tcg_temp_free(tmp);
+ tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
+ /* Only the exception bits (including FX) should be cleared if read */
+ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
+ /* FEX and VX need to be updated, so don't set fpscr directly */
+ tmask = tcg_const_i32(1 << nibble);
+ gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
+ tcg_temp_free_i32(tmask);
+ tcg_temp_free_i64(tnew_fpscr);
}
/* mffs */
@@ -3212,10 +3227,8 @@ static void gen_lswi(DisasContext *ctx)
if (nb == 0)
nb = 32;
- nr = nb / 4;
- if (unlikely(((start + nr) > 32 &&
- start <= ra && (start + nr - 32) > ra) ||
- ((start + nr) <= 32 && start <= ra && (start + nr) > ra))) {
+ nr = (nb + 3) / 4;
+ if (unlikely(lsw_reg_in_range(start, nr, ra))) {
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
return;
}
@@ -4267,14 +4280,17 @@ static inline void gen_op_mfspr(DisasContext *ctx)
void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
uint32_t sprn = SPR(ctx->opcode);
-#if !defined(CONFIG_USER_ONLY)
- if (ctx->hv)
+#if defined(CONFIG_USER_ONLY)
+ read_cb = ctx->spr_cb[sprn].uea_read;
+#else
+ if (ctx->pr) {
+ read_cb = ctx->spr_cb[sprn].uea_read;
+ } else if (ctx->hv) {
read_cb = ctx->spr_cb[sprn].hea_read;
- else if (!ctx->pr)
+ } else {
read_cb = ctx->spr_cb[sprn].oea_read;
- else
+ }
#endif
- read_cb = ctx->spr_cb[sprn].uea_read;
if (likely(read_cb != NULL)) {
if (likely(read_cb != SPR_NOACCESS)) {
(*read_cb)(ctx, rD(ctx->opcode), sprn);
@@ -4285,19 +4301,23 @@ static inline void gen_op_mfspr(DisasContext *ctx)
* allowing userland application to read the PVR
*/
if (sprn != SPR_PVR) {
- qemu_log("Trying to read privileged spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
- printf("Trying to read privileged spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ fprintf(stderr, "Trying to read privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to read privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
}
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
} else {
/* Not defined */
- qemu_log("Trying to read invalid spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
- printf("Trying to read invalid spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ fprintf(stderr, "Trying to read invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to read invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
}
}
@@ -4418,31 +4438,38 @@ static void gen_mtspr(DisasContext *ctx)
void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
uint32_t sprn = SPR(ctx->opcode);
-#if !defined(CONFIG_USER_ONLY)
- if (ctx->hv)
+#if defined(CONFIG_USER_ONLY)
+ write_cb = ctx->spr_cb[sprn].uea_write;
+#else
+ if (ctx->pr) {
+ write_cb = ctx->spr_cb[sprn].uea_write;
+ } else if (ctx->hv) {
write_cb = ctx->spr_cb[sprn].hea_write;
- else if (!ctx->pr)
+ } else {
write_cb = ctx->spr_cb[sprn].oea_write;
- else
+ }
#endif
- write_cb = ctx->spr_cb[sprn].uea_write;
if (likely(write_cb != NULL)) {
if (likely(write_cb != SPR_NOACCESS)) {
(*write_cb)(ctx, sprn, rS(ctx->opcode));
} else {
/* Privilege exception */
- qemu_log("Trying to write privileged spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
- printf("Trying to write privileged spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ fprintf(stderr, "Trying to write privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to write privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
}
} else {
/* Not defined */
- qemu_log("Trying to write invalid spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
- printf("Trying to write invalid spr %d (0x%03x) at "
- TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to write invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
+ fprintf(stderr, "Trying to write invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
gen_inval_exception(ctx, POWERPC_EXCP_INVAL_SPR);
}
}
@@ -5896,7 +5923,7 @@ static void gen_tlbiva(DisasContext *ctx)
}
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
- gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
tcg_temp_free(t0);
#endif
}
@@ -9884,7 +9911,7 @@ GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
#endif
GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001FF801, PPC_MISC),
-GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000001, PPC_MISC),
+GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
@@ -10670,6 +10697,13 @@ GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+#undef GEN_XX2IFORM
+#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2)
+
#undef GEN_XX3_RC_FORM
#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \
GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \
@@ -10731,8 +10765,8 @@ GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
-GEN_XX2FORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
-GEN_XX2FORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
+GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
+GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
@@ -11320,9 +11354,11 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
case POWERPC_MMU_SOFT_74xx:
#if defined(TARGET_PPC64)
case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
case POWERPC_MMU_2_06:
case POWERPC_MMU_2_06a:
- case POWERPC_MMU_2_06d:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
#endif
cpu_fprintf(f, " SDR1 " TARGET_FMT_lx " DAR " TARGET_FMT_lx
" DSISR " TARGET_FMT_lx "\n", env->spr[SPR_SDR1],
@@ -11402,17 +11438,13 @@ void ppc_cpu_dump_statistics(CPUState *cs, FILE*f,
}
/*****************************************************************************/
-static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
- TranslationBlock *tb,
- bool search_pc)
+void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
- CPUPPCState *env = &cpu->env;
DisasContext ctx, *ctxp = &ctx;
opc_handler_t **table, *handler;
target_ulong pc_start;
- CPUBreakpoint *bp;
- int j, lj = -1;
int num_insns;
int max_insns;
@@ -11469,36 +11501,34 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
#endif
num_insns = 0;
max_insns = tb->cflags & CF_COUNT_MASK;
- if (max_insns == 0)
+ if (max_insns == 0) {
max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
gen_tb_start(tb);
tcg_clear_temp_count();
/* Set env in case of segfault during code fetch */
while (ctx.exception == POWERPC_EXCP_NONE && !tcg_op_buf_full()) {
- if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
- QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
- if (bp->pc == ctx.nip) {
- gen_debug_exception(ctxp);
- break;
- }
- }
- }
- if (unlikely(search_pc)) {
- j = tcg_op_buf_count();
- if (lj < j) {
- lj++;
- while (lj < j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- }
- tcg_ctx.gen_opc_pc[lj] = ctx.nip;
- tcg_ctx.gen_opc_instr_start[lj] = 1;
- tcg_ctx.gen_opc_icount[lj] = num_insns;
+ tcg_gen_insn_start(ctx.nip);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.nip, BP_ANY))) {
+ gen_debug_exception(ctxp);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.nip += 4;
+ break;
}
+
LOG_DISAS("----------------\n");
LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
ctx.nip, ctx.mem_idx, (int)msr_ir);
- if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
if (unlikely(need_byteswap(&ctx))) {
ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
@@ -11508,12 +11538,8 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
LOG_DISAS("translate opcode %08x (%02x %02x %02x) (%s)\n",
ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
opc3(ctx.opcode), ctx.le_mode ? "little" : "big");
- if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
- tcg_gen_debug_insn_start(ctx.nip);
- }
ctx.nip += 4;
table = env->opcodes;
- num_insns++;
handler = table[opc1(ctx.opcode)];
if (is_indirect_opcode(handler)) {
table = ind_table(handler);
@@ -11525,12 +11551,10 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
}
/* Is opcode *REALLY* valid ? */
if (unlikely(handler->handler == &gen_invalid)) {
- if (qemu_log_enabled()) {
- qemu_log("invalid/unsupported opcode: "
- "%02x - %02x - %02x (%08x) " TARGET_FMT_lx " %d\n",
- opc1(ctx.opcode), opc2(ctx.opcode),
- opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, (int)msr_ir);
- }
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
+ "%02x - %02x - %02x (%08x) " TARGET_FMT_lx " %d\n",
+ opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), ctx.opcode, ctx.nip - 4, (int)msr_ir);
} else {
uint32_t inval;
@@ -11541,13 +11565,11 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
}
if (unlikely((ctx.opcode & inval) != 0)) {
- if (qemu_log_enabled()) {
- qemu_log("invalid bits: %08x for opcode: "
- "%02x - %02x - %02x (%08x) " TARGET_FMT_lx "\n",
- ctx.opcode & inval, opc1(ctx.opcode),
- opc2(ctx.opcode), opc3(ctx.opcode),
- ctx.opcode, ctx.nip - 4);
- }
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
+ "%02x - %02x - %02x (%08x) " TARGET_FMT_lx "\n",
+ ctx.opcode & inval, opc1(ctx.opcode),
+ opc2(ctx.opcode), opc3(ctx.opcode),
+ ctx.opcode, ctx.nip - 4);
gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL);
break;
}
@@ -11592,15 +11614,9 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
}
gen_tb_end(tb, num_insns);
- if (unlikely(search_pc)) {
- j = tcg_op_buf_count();
- lj++;
- while (lj <= j)
- tcg_ctx.gen_opc_instr_start[lj++] = 0;
- } else {
- tb->size = ctx.nip - pc_start;
- tb->icount = num_insns;
- }
+ tb->size = ctx.nip - pc_start;
+ tb->icount = num_insns;
+
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
int flags;
@@ -11613,17 +11629,8 @@ static inline void gen_intermediate_code_internal(PowerPCCPU *cpu,
#endif
}
-void gen_intermediate_code (CPUPPCState *env, struct TranslationBlock *tb)
-{
- gen_intermediate_code_internal(ppc_env_get_cpu(env), tb, false);
-}
-
-void gen_intermediate_code_pc (CPUPPCState *env, struct TranslationBlock *tb)
-{
- gen_intermediate_code_internal(ppc_env_get_cpu(env), tb, true);
-}
-
-void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
+ target_ulong *data)
{
- env->nip = tcg_ctx.gen_opc_pc[pc_pos];
+ env->nip = data[0];
}
diff --git a/qemu/target-ppc/translate_init.c b/qemu/target-ppc/translate_init.c
index 16d7b16ac..f51572552 100644
--- a/qemu/target-ppc/translate_init.c
+++ b/qemu/target-ppc/translate_init.c
@@ -18,6 +18,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "disas/bfd.h"
#include "exec/gdbstub.h"
#include <sysemu/kvm.h>
@@ -305,7 +306,7 @@ static void spr_read_ibat (DisasContext *ctx, int gprn, int sprn)
static void spr_read_ibat_h (DisasContext *ctx, int gprn, int sprn)
{
- tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT4U) / 2]));
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
}
static void spr_write_ibatu (DisasContext *ctx, int sprn, int gprn)
@@ -578,17 +579,33 @@ static inline void vscr_init (CPUPPCState *env, uint32_t val)
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
oea_read, oea_write, one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, initial_value)
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, initial_value)
#else
#if !defined(CONFIG_KVM)
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, one_reg_id, initial_value) \
+ oea_read, oea_write, one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, oea_read, oea_write, initial_value)
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, initial_value)
+ oea_read, oea_write, hea_read, hea_write, initial_value)
#else
#define spr_register_kvm(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, one_reg_id, initial_value) \
+ oea_read, oea_write, one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, oea_read, oea_write, \
+ one_reg_id, initial_value)
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
_spr_register(env, num, name, uea_read, uea_write, \
- oea_read, oea_write, one_reg_id, initial_value)
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value)
#endif
#endif
@@ -597,6 +614,13 @@ static inline void vscr_init (CPUPPCState *env, uint32_t val)
spr_register_kvm(env, num, name, uea_read, uea_write, \
oea_read, oea_write, 0, initial_value)
+#define spr_register_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ initial_value) \
+ spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ 0, initial_value)
+
static inline void _spr_register(CPUPPCState *env, int num,
const char *name,
void (*uea_read)(DisasContext *ctx, int gprn, int sprn),
@@ -605,6 +629,8 @@ static inline void _spr_register(CPUPPCState *env, int num,
void (*oea_read)(DisasContext *ctx, int gprn, int sprn),
void (*oea_write)(DisasContext *ctx, int sprn, int gprn),
+ void (*hea_read)(DisasContext *opaque, int gprn, int sprn),
+ void (*hea_write)(DisasContext *opaque, int sprn, int gprn),
#endif
#if defined(CONFIG_KVM)
uint64_t one_reg_id,
@@ -632,6 +658,8 @@ static inline void _spr_register(CPUPPCState *env, int num,
#if !defined(CONFIG_USER_ONLY)
spr->oea_read = oea_read;
spr->oea_write = oea_write;
+ spr->hea_read = hea_read;
+ spr->hea_write = hea_write;
#endif
#if defined(CONFIG_KVM)
spr->one_reg_id = one_reg_id,
@@ -1035,30 +1063,102 @@ static void gen_spr_7xx (CPUPPCState *env)
#ifdef TARGET_PPC64
#ifndef CONFIG_USER_ONLY
-static void spr_read_uamr (DisasContext *ctx, int gprn, int sprn)
+static void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
{
- gen_load_spr(cpu_gpr[gprn], SPR_AMR);
- spr_load_dump_spr(SPR_AMR);
-}
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
-static void spr_write_uamr (DisasContext *ctx, int sprn, int gprn)
-{
- gen_store_spr(SPR_AMR, cpu_gpr[gprn]);
+ /* Note, the HV=1 PR=0 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ if (ctx->pr) {
+ gen_load_spr(t1, SPR_UAMOR);
+ } else {
+ gen_load_spr(t1, SPR_AMOR);
+ }
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_AMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_AMR, t0);
spr_store_dump_spr(SPR_AMR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
}
-static void spr_write_uamr_pr (DisasContext *ctx, int sprn, int gprn)
+static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
{
TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /* Note, the HV=1 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ gen_load_spr(t1, SPR_AMOR);
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
gen_load_spr(t0, SPR_UAMOR);
- tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
- gen_store_spr(SPR_AMR, t0);
- spr_store_dump_spr(SPR_AMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_UAMOR, t0);
+ spr_store_dump_spr(SPR_UAMOR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /* Note, the HV=1 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ gen_load_spr(t1, SPR_AMOR);
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_IAMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_IAMR, t0);
+ spr_store_dump_spr(SPR_IAMR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
}
#endif /* CONFIG_USER_ONLY */
-static void gen_spr_amr (CPUPPCState *env)
+static void gen_spr_amr(CPUPPCState *env, bool has_iamr)
{
#ifndef CONFIG_USER_ONLY
/* Virtual Page Class Key protection */
@@ -1066,17 +1166,31 @@ static void gen_spr_amr (CPUPPCState *env)
* userspace accessible, 29 is privileged. So we only need to set
* the kvm ONE_REG id on one of them, we use 29 */
spr_register(env, SPR_UAMR, "UAMR",
- &spr_read_uamr, &spr_write_uamr_pr,
- &spr_read_uamr, &spr_write_uamr,
+ &spr_read_generic, &spr_write_amr,
+ &spr_read_generic, &spr_write_amr,
0);
- spr_register_kvm(env, SPR_AMR, "AMR",
+ spr_register_kvm_hv(env, SPR_AMR, "AMR",
SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_amr,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_AMR, 0);
- spr_register_kvm(env, SPR_UAMOR, "UAMOR",
+ spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR",
SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_uamor,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_UAMOR, 0);
+ spr_register_hv(env, SPR_AMOR, "AMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+ if (has_iamr) {
+ spr_register_kvm_hv(env, SPR_IAMR, "IAMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_iamr,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_IAMR, 0);
+ }
#endif /* !CONFIG_USER_ONLY */
}
#endif /* TARGET_PPC64 */
@@ -7463,6 +7577,25 @@ static void gen_spr_book3s_dbg(CPUPPCState *env)
KVM_REG_PPC_DABRX, 0x00000000);
}
+static void gen_spr_book3s_207_dbg(CPUPPCState *env)
+{
+ spr_register_kvm_hv(env, SPR_DAWR, "DAWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAWR, 0x00000000);
+ spr_register_kvm_hv(env, SPR_DAWRX, "DAWRX",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAWRX, 0x00000000);
+ spr_register_kvm_hv(env, SPR_CIABR, "CIABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_CIABR, 0x00000000);
+}
+
static void gen_spr_970_dbg(CPUPPCState *env)
{
/* Breakpoints */
@@ -7602,6 +7735,30 @@ static void gen_spr_power8_pmu_sup(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
KVM_REG_PPC_MMCRS, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIER, "SIER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIER, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SPMC1, "SPMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SPMC1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SPMC2, "SPMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SPMC2, 0x00000000);
+ spr_register_kvm(env, SPR_TACR, "TACR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TACR, 0x00000000);
+ spr_register_kvm(env, SPR_TCSCR, "TCSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TCSCR, 0x00000000);
+ spr_register_kvm(env, SPR_CSIGR, "CSIGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_CSIGR, 0x00000000);
}
static void gen_spr_power8_pmu_user(CPUPPCState *env)
@@ -7610,6 +7767,10 @@ static void gen_spr_power8_pmu_user(CPUPPCState *env)
&spr_read_ureg, SPR_NOACCESS,
&spr_read_ureg, &spr_write_ureg,
0x00000000);
+ spr_register(env, SPR_POWER_USIER, "USIER",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
}
static void gen_spr_power5p_ear(CPUPPCState *env)
@@ -7713,10 +7874,10 @@ static void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
static void gen_spr_power8_tce_address_control(CPUPPCState *env)
{
- spr_register(env, SPR_TAR, "TAR",
- &spr_read_tar, &spr_write_tar,
- &spr_read_generic, &spr_write_generic,
- 0x00000000);
+ spr_register_kvm(env, SPR_TAR, "TAR",
+ &spr_read_tar, &spr_write_tar,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TAR, 0x00000000);
}
static void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
@@ -7841,6 +8002,44 @@ static void gen_spr_power8_fscr(CPUPPCState *env)
KVM_REG_PPC_FSCR, initval);
}
+static void gen_spr_power8_pspb(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_PSPB, "PSPB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic32,
+ KVM_REG_PPC_PSPB, 0);
+}
+
+static void gen_spr_power8_ic(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_IC, "IC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+#endif
+}
+
+static void gen_spr_power8_book4(CPUPPCState *env)
+{
+ /* Add a number of P8 book4 registers */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_ACOP, "ACOP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_ACOP, 0);
+ spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PID, 0);
+ spr_register_kvm(env, SPR_WORT, "WORT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_WORT, 0);
+#endif
+}
+
static void init_proc_book3s_64(CPUPPCState *env, int version)
{
gen_spr_ne_601(env);
@@ -7862,8 +8061,9 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
case BOOK3S_CPU_POWER7:
case BOOK3S_CPU_POWER8:
gen_spr_book3s_ids(env);
- gen_spr_amr(env);
+ gen_spr_amr(env, version >= BOOK3S_CPU_POWER8);
gen_spr_book3s_purr(env);
+ env->ci_large_pages = true;
break;
default:
g_assert_not_reached();
@@ -7890,10 +8090,15 @@ static void init_proc_book3s_64(CPUPPCState *env, int version)
gen_spr_power8_pmu_sup(env);
gen_spr_power8_pmu_user(env);
gen_spr_power8_tm(env);
+ gen_spr_power8_pspb(env);
gen_spr_vtb(env);
+ gen_spr_power8_ic(env);
+ gen_spr_power8_book4(env);
}
if (version < BOOK3S_CPU_POWER8) {
gen_spr_book3s_dbg(env);
+ } else {
+ gen_spr_book3s_207_dbg(env);
}
#if !defined(CONFIG_USER_ONLY)
switch (version) {
@@ -8019,7 +8224,7 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
(1ull << MSR_DR) |
(1ull << MSR_PMM) |
(1ull << MSR_RI);
- pcc->mmu_model = POWERPC_MMU_64B;
+ pcc->mmu_model = POWERPC_MMU_2_03;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
#endif
@@ -8033,8 +8238,8 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x10000;
}
-static void powerpc_get_compat(Object *obj, Visitor *v,
- void *opaque, const char *name, Error **errp)
+static void powerpc_get_compat(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
char *value = (char *)"";
Property *prop = opaque;
@@ -8058,18 +8263,18 @@ static void powerpc_get_compat(Object *obj, Visitor *v,
break;
}
- visit_type_str(v, &value, name, errp);
+ visit_type_str(v, name, &value, errp);
}
-static void powerpc_set_compat(Object *obj, Visitor *v,
- void *opaque, const char *name, Error **errp)
+static void powerpc_set_compat(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
Error *error = NULL;
char *value = NULL;
Property *prop = opaque;
uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop);
- visit_type_str(v, &value, name, &error);
+ visit_type_str(v, name, &value, &error);
if (error) {
error_propagate(errp, error);
return;
@@ -8103,6 +8308,36 @@ static Property powerpc_servercpu_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+#ifdef CONFIG_SOFTMMU
+static const struct ppc_segment_page_sizes POWER7_POWER8_sps = {
+ .sps = {
+ {
+ .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 },
+ { .page_shift = 16, .pte_enc = 0x7 },
+ { .page_shift = 24, .pte_enc = 0x38 }, },
+ },
+ {
+ .page_shift = 16, /* 64K */
+ .slb_enc = SLB_VSID_64K,
+ .enc = { { .page_shift = 16, .pte_enc = 0x1 },
+ { .page_shift = 24, .pte_enc = 0x8 }, },
+ },
+ {
+ .page_shift = 24, /* 16M */
+ .slb_enc = SLB_VSID_16M,
+ .enc = { { .page_shift = 24, .pte_enc = 0 }, },
+ },
+ {
+ .page_shift = 34, /* 16G */
+ .slb_enc = SLB_VSID_16G,
+ .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
+ },
+ }
+};
+#endif /* CONFIG_SOFTMMU */
+
static void init_proc_POWER7 (CPUPPCState *env)
{
init_proc_book3s_64(env, BOOK3S_CPU_POWER7);
@@ -8166,6 +8401,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ pcc->sps = &POWER7_POWER8_sps;
#endif
pcc->excp_model = POWERPC_EXCP_POWER7;
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
@@ -8186,6 +8422,9 @@ static void init_proc_POWER8(CPUPPCState *env)
static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr)
{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) {
+ return true;
+ }
if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) {
return true;
}
@@ -8243,11 +8482,12 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
- pcc->mmu_model = POWERPC_MMU_2_06;
+ pcc->mmu_model = POWERPC_MMU_2_07;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ pcc->sps = &POWER7_POWER8_sps;
#endif
- pcc->excp_model = POWERPC_EXCP_POWER7;
+ pcc->excp_model = POWERPC_EXCP_POWER8;
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
@@ -8258,8 +8498,33 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
}
-#endif /* defined (TARGET_PPC64) */
+#if !defined(CONFIG_USER_ONLY)
+
+void cpu_ppc_set_papr(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_spr_t *amor = &env->spr_cb[SPR_AMOR];
+
+ /* PAPR always has exception vectors in RAM not ROM. To ensure this,
+ * MSR[IP] should never be set.
+ *
+ * We also disallow setting of MSR_HV
+ */
+ env->msr_mask &= ~((1ull << MSR_EP) | MSR_HVB);
+
+ /* Set a full AMOR so guest can use the AMR as it sees fit */
+ env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull;
+
+ /* Tell KVM that we're in PAPR mode */
+ if (kvm_enabled()) {
+ kvmppc_set_papr(cpu);
+ }
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+#endif /* defined (TARGET_PPC64) */
/*****************************************************************************/
/* Generic CPU instantiation routine */
@@ -8470,8 +8735,6 @@ static void dump_ppc_sprs (CPUPPCState *env)
#endif
/*****************************************************************************/
-#include <stdlib.h>
-#include <string.h>
/* Opcode types */
enum {
@@ -8577,11 +8840,7 @@ static int register_ind_insn (opc_handler_t **ppc_opcodes,
unsigned char idx1, unsigned char idx2,
opc_handler_t *handler)
{
- int ret;
-
- ret = register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
-
- return ret;
+ return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
}
static int register_dblind_insn (opc_handler_t **ppc_opcodes,
@@ -8754,14 +9013,25 @@ static void dump_ppc_insns (CPUPPCState *env)
}
#endif
+static bool avr_need_swap(CPUPPCState *env)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ return msr_le;
+#else
+ return !msr_le;
+#endif
+}
+
static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
stfq_p(mem_buf, env->fpr[n]);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
return 8;
}
if (n == 32) {
stl_p(mem_buf, env->fpscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
return 0;
@@ -8770,10 +9040,12 @@ static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
env->fpr[n] = ldfq_p(mem_buf);
return 8;
}
if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff);
return 4;
}
@@ -8783,21 +9055,25 @@ static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
-#ifdef HOST_WORDS_BIGENDIAN
- stq_p(mem_buf, env->avr[n].u64[0]);
- stq_p(mem_buf+8, env->avr[n].u64[1]);
-#else
- stq_p(mem_buf, env->avr[n].u64[1]);
- stq_p(mem_buf+8, env->avr[n].u64[0]);
-#endif
+ if (!avr_need_swap(env)) {
+ stq_p(mem_buf, env->avr[n].u64[0]);
+ stq_p(mem_buf+8, env->avr[n].u64[1]);
+ } else {
+ stq_p(mem_buf, env->avr[n].u64[1]);
+ stq_p(mem_buf+8, env->avr[n].u64[0]);
+ }
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf + 8, 8);
return 16;
}
if (n == 32) {
stl_p(mem_buf, env->vscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
if (n == 33) {
stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
return 0;
@@ -8806,20 +9082,24 @@ static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
-#ifdef HOST_WORDS_BIGENDIAN
- env->avr[n].u64[0] = ldq_p(mem_buf);
- env->avr[n].u64[1] = ldq_p(mem_buf+8);
-#else
- env->avr[n].u64[1] = ldq_p(mem_buf);
- env->avr[n].u64[0] = ldq_p(mem_buf+8);
-#endif
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf + 8, 8);
+ if (!avr_need_swap(env)) {
+ env->avr[n].u64[0] = ldq_p(mem_buf);
+ env->avr[n].u64[1] = ldq_p(mem_buf+8);
+ } else {
+ env->avr[n].u64[1] = ldq_p(mem_buf);
+ env->avr[n].u64[0] = ldq_p(mem_buf+8);
+ }
return 16;
}
if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
env->vscr = ldl_p(mem_buf);
return 4;
}
if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf);
return 4;
}
@@ -8831,6 +9111,7 @@ static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
if (n < 32) {
#if defined(TARGET_PPC64)
stl_p(mem_buf, env->gpr[n] >> 32);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
#else
stl_p(mem_buf, env->gprh[n]);
#endif
@@ -8838,10 +9119,12 @@ static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
}
if (n == 32) {
stq_p(mem_buf, env->spe_acc);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
return 8;
}
if (n == 33) {
stl_p(mem_buf, env->spe_fscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
return 4;
}
return 0;
@@ -8852,7 +9135,11 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
if (n < 32) {
#if defined(TARGET_PPC64)
target_ulong lo = (uint32_t)env->gpr[n];
- target_ulong hi = (target_ulong)ldl_p(mem_buf) << 32;
+ target_ulong hi;
+
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+
+ hi = (target_ulong)ldl_p(mem_buf) << 32;
env->gpr[n] = lo | hi;
#else
env->gprh[n] = ldl_p(mem_buf);
@@ -8860,16 +9147,38 @@ static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
return 4;
}
if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
env->spe_acc = ldq_p(mem_buf);
return 8;
}
if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
env->spe_fscr = ldl_p(mem_buf);
return 4;
}
return 0;
}
+static int gdb_get_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ stq_p(mem_buf, env->vsr[n]);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ return 8;
+ }
+ return 0;
+}
+
+static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ env->vsr[n] = ldq_p(mem_buf);
+ return 8;
+ }
+ return 0;
+}
+
static int ppc_fixup_cpu(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
@@ -8975,6 +9284,10 @@ static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
34, "power-spe.xml", 0);
}
+ if (pcc->insns_flags2 & PPC2_VSX) {
+ gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
+ 32, "power-vsx.xml", 0);
+ }
qemu_init_vcpu(cs);
@@ -9189,7 +9502,7 @@ int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
return ret;
}
-int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
+void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp)
{
int ret = 0;
CPUPPCState *env = &cpu->env;
@@ -9211,12 +9524,13 @@ int ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
break;
}
- if (kvm_enabled() && kvmppc_set_compat(cpu, cpu->cpu_version) < 0) {
- error_report("Unable to set compatibility mode in KVM");
- ret = -1;
+ if (kvm_enabled()) {
+ ret = kvmppc_set_compat(cpu, cpu->cpu_version);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "Unable to set CPU compatibility mode in KVM");
+ }
}
-
- return ret;
}
static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b)
@@ -9306,7 +9620,6 @@ static gint ppc_cpu_compare_class_name(gconstpointer a, gconstpointer b)
return -1;
}
-#include <ctype.h>
static ObjectClass *ppc_cpu_class_by_name(const char *name);
@@ -9350,8 +9663,7 @@ static ObjectClass *ppc_cpu_class_by_name(const char *name)
break;
}
if (i == 8) {
- ret = OBJECT_CLASS(ppc_cpu_class_by_pvr(strtoul(name, NULL, 16)));
- return ret;
+ return OBJECT_CLASS(ppc_cpu_class_by_pvr(strtoul(name, NULL, 16)));
}
}
@@ -9582,7 +9894,7 @@ static void ppc_cpu_reset(CPUState *s)
#if defined(TARGET_PPC64)
if (env->mmu_model & POWERPC_MMU_64) {
- env->msr |= (1ULL << MSR_SF);
+ msr |= (1ULL << MSR_SF);
}
#endif
@@ -9685,6 +9997,15 @@ static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr)
return pcc->pvr == pvr;
}
+static gchar *ppc_gdb_arch_name(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ return g_strdup("powerpc:common64");
+#else
+ return g_strdup("powerpc:common");
+#endif
+}
+
static void ppc_cpu_class_init(ObjectClass *oc, void *data)
{
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
@@ -9716,7 +10037,6 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->vmsd = &vmstate_ppc_cpu;
#if defined(TARGET_PPC64)
cc->write_elf64_note = ppc64_cpu_write_elf64_note;
- cc->write_elf64_qemunote = ppc64_cpu_write_elf64_qemunote;
#endif
#endif
cc->cpu_exec_enter = ppc_cpu_exec_enter;
@@ -9729,6 +10049,7 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data)
cc->gdb_num_core_regs = 71 + 32;
#endif
+ cc->gdb_arch_name = ppc_gdb_arch_name;
#if defined(TARGET_PPC64)
cc->gdb_core_xml_file = "power64-core.xml";
#else
diff --git a/qemu/target-ppc/user_only_helper.c b/qemu/target-ppc/user_only_helper.c
index 829f66f50..6aff34713 100644
--- a/qemu/target-ppc/user_only_helper.c
+++ b/qemu/target-ppc/user_only_helper.c
@@ -18,6 +18,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,